forked from barak/tarpoon
Add glide.yaml and vendor deps
This commit is contained in:
parent
db918f12ad
commit
5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions
44
vendor/k8s.io/kubernetes/pkg/volume/host_path/BUILD
generated
vendored
Normal file
44
vendor/k8s.io/kubernetes/pkg/volume/host_path/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"host_path.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/conversion:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/uuid:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["host_path_test.go"],
|
||||
library = "go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util:go_default_library",
|
||||
"//pkg/util/uuid:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
3
vendor/k8s.io/kubernetes/pkg/volume/host_path/OWNERS
generated
vendored
Normal file
3
vendor/k8s.io/kubernetes/pkg/volume/host_path/OWNERS
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
maintainers:
|
||||
- saad-ali
|
||||
- thockin
|
||||
19
vendor/k8s.io/kubernetes/pkg/volume/host_path/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/volume/host_path/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package host_path contains the internal representation of hostPath
|
||||
// volumes.
|
||||
package host_path // import "k8s.io/kubernetes/pkg/volume/host_path"
|
||||
338
vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path.go
generated
vendored
Normal file
338
vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path.go
generated
vendored
Normal file
|
|
@ -0,0 +1,338 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package host_path
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/conversion"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
// The volumeConfig arg provides the ability to configure volume behavior. It is implemented as a pointer to allow nils.
|
||||
// The hostPathPlugin is used to store the volumeConfig and give it, when needed, to the func that creates HostPath Recyclers.
|
||||
// Tests that exercise recycling should not use this func but instead use ProbeRecyclablePlugins() to override default behavior.
|
||||
func ProbeVolumePlugins(volumeConfig volume.VolumeConfig) []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{
|
||||
&hostPathPlugin{
|
||||
host: nil,
|
||||
config: volumeConfig,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type hostPathPlugin struct {
|
||||
host volume.VolumeHost
|
||||
config volume.VolumeConfig
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &hostPathPlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &hostPathPlugin{}
|
||||
var _ volume.RecyclableVolumePlugin = &hostPathPlugin{}
|
||||
var _ volume.DeletableVolumePlugin = &hostPathPlugin{}
|
||||
var _ volume.ProvisionableVolumePlugin = &hostPathPlugin{}
|
||||
|
||||
const (
|
||||
hostPathPluginName = "kubernetes.io/host-path"
|
||||
)
|
||||
|
||||
func (plugin *hostPathPlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) GetPluginName() string {
|
||||
return hostPathPluginName
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return volumeSource.Path, nil
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.HostPath != nil) ||
|
||||
(spec.Volume != nil && spec.Volume.HostPath != nil)
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) RequiresRemount() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
return []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
hostPathVolumeSource, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &hostPathMounter{
|
||||
hostPath: &hostPath{path: hostPathVolumeSource.Path},
|
||||
readOnly: readOnly,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
return &hostPathUnmounter{&hostPath{
|
||||
path: "",
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) NewRecycler(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder) (volume.Recycler, error) {
|
||||
return newRecycler(pvName, spec, eventRecorder, plugin.host, plugin.config)
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
||||
return newDeleter(spec, plugin.host)
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
if !plugin.config.ProvisioningEnabled {
|
||||
return nil, fmt.Errorf("Provisioning in volume plugin %q is disabled", plugin.GetPluginName())
|
||||
}
|
||||
return newProvisioner(options, plugin.host, plugin)
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
hostPathVolume := &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: volumeName,
|
||||
},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(hostPathVolume), nil
|
||||
}
|
||||
|
||||
func newRecycler(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder, host volume.VolumeHost, config volume.VolumeConfig) (volume.Recycler, error) {
|
||||
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.HostPath == nil {
|
||||
return nil, fmt.Errorf("spec.PersistentVolumeSource.HostPath is nil")
|
||||
}
|
||||
path := spec.PersistentVolume.Spec.HostPath.Path
|
||||
return &hostPathRecycler{
|
||||
name: spec.Name(),
|
||||
path: path,
|
||||
host: host,
|
||||
config: config,
|
||||
timeout: volume.CalculateTimeoutForVolume(config.RecyclerMinimumTimeout, config.RecyclerTimeoutIncrement, spec.PersistentVolume),
|
||||
pvName: pvName,
|
||||
eventRecorder: eventRecorder,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newDeleter(spec *volume.Spec, host volume.VolumeHost) (volume.Deleter, error) {
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.HostPath == nil {
|
||||
return nil, fmt.Errorf("spec.PersistentVolumeSource.HostPath is nil")
|
||||
}
|
||||
path := spec.PersistentVolume.Spec.HostPath.Path
|
||||
return &hostPathDeleter{name: spec.Name(), path: path, host: host}, nil
|
||||
}
|
||||
|
||||
func newProvisioner(options volume.VolumeOptions, host volume.VolumeHost, plugin *hostPathPlugin) (volume.Provisioner, error) {
|
||||
return &hostPathProvisioner{options: options, host: host, plugin: plugin}, nil
|
||||
}
|
||||
|
||||
// HostPath volumes represent a bare host file or directory mount.
|
||||
// The direct at the specified path will be directly exposed to the container.
|
||||
type hostPath struct {
|
||||
path string
|
||||
volume.MetricsNil
|
||||
}
|
||||
|
||||
func (hp *hostPath) GetPath() string {
|
||||
return hp.path
|
||||
}
|
||||
|
||||
type hostPathMounter struct {
|
||||
*hostPath
|
||||
readOnly bool
|
||||
}
|
||||
|
||||
var _ volume.Mounter = &hostPathMounter{}
|
||||
|
||||
func (b *hostPathMounter) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: b.readOnly,
|
||||
Managed: false,
|
||||
SupportsSELinux: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (b *hostPathMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUp does nothing.
|
||||
func (b *hostPathMounter) SetUp(fsGroup *int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUpAt does not make sense for host paths - probably programmer error.
|
||||
func (b *hostPathMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
return fmt.Errorf("SetUpAt() does not make sense for host paths")
|
||||
}
|
||||
|
||||
func (b *hostPathMounter) GetPath() string {
|
||||
return b.path
|
||||
}
|
||||
|
||||
type hostPathUnmounter struct {
|
||||
*hostPath
|
||||
}
|
||||
|
||||
var _ volume.Unmounter = &hostPathUnmounter{}
|
||||
|
||||
// TearDown does nothing.
|
||||
func (c *hostPathUnmounter) TearDown() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TearDownAt does not make sense for host paths - probably programmer error.
|
||||
func (c *hostPathUnmounter) TearDownAt(dir string) error {
|
||||
return fmt.Errorf("TearDownAt() does not make sense for host paths")
|
||||
}
|
||||
|
||||
// hostPathRecycler implements a Recycler for the HostPath plugin
|
||||
// This implementation is meant for testing only and only works in a single node cluster
|
||||
type hostPathRecycler struct {
|
||||
name string
|
||||
path string
|
||||
host volume.VolumeHost
|
||||
config volume.VolumeConfig
|
||||
timeout int64
|
||||
volume.MetricsNil
|
||||
pvName string
|
||||
eventRecorder volume.RecycleEventRecorder
|
||||
}
|
||||
|
||||
func (r *hostPathRecycler) GetPath() string {
|
||||
return r.path
|
||||
}
|
||||
|
||||
// Recycle recycles/scrubs clean a HostPath volume.
|
||||
// Recycle blocks until the pod has completed or any error occurs.
|
||||
// HostPath recycling only works in single node clusters and is meant for testing purposes only.
|
||||
func (r *hostPathRecycler) Recycle() error {
|
||||
templateClone, err := conversion.NewCloner().DeepCopy(r.config.RecyclerPodTemplate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pod := templateClone.(*v1.Pod)
|
||||
// overrides
|
||||
pod.Spec.ActiveDeadlineSeconds = &r.timeout
|
||||
pod.Spec.Volumes[0].VolumeSource = v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: r.path,
|
||||
},
|
||||
}
|
||||
return volume.RecycleVolumeByWatchingPodUntilCompletion(r.pvName, pod, r.host.GetKubeClient(), r.eventRecorder)
|
||||
}
|
||||
|
||||
// hostPathProvisioner implements a Provisioner for the HostPath plugin
|
||||
// This implementation is meant for testing only and only works in a single node cluster.
|
||||
type hostPathProvisioner struct {
|
||||
host volume.VolumeHost
|
||||
options volume.VolumeOptions
|
||||
plugin *hostPathPlugin
|
||||
}
|
||||
|
||||
// Create for hostPath simply creates a local /tmp/hostpath_pv/%s directory as a new PersistentVolume.
|
||||
// This Provisioner is meant for development and testing only and WILL NOT WORK in a multi-node cluster.
|
||||
func (r *hostPathProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", uuid.NewUUID())
|
||||
|
||||
capacity := r.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: r.options.PVName,
|
||||
Annotations: map[string]string{
|
||||
"kubernetes.io/createdby": "hostpath-dynamic-provisioner",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: r.options.PersistentVolumeReclaimPolicy,
|
||||
AccessModes: r.options.PVC.Spec.AccessModes,
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): capacity,
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: fullpath,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if len(r.options.PVC.Spec.AccessModes) == 0 {
|
||||
pv.Spec.AccessModes = r.plugin.GetAccessModes()
|
||||
}
|
||||
|
||||
return pv, os.MkdirAll(pv.Spec.HostPath.Path, 0750)
|
||||
}
|
||||
|
||||
// hostPathDeleter deletes a hostPath PV from the cluster.
|
||||
// This deleter only works on a single host cluster and is for testing purposes only.
|
||||
type hostPathDeleter struct {
|
||||
name string
|
||||
path string
|
||||
host volume.VolumeHost
|
||||
volume.MetricsNil
|
||||
}
|
||||
|
||||
func (r *hostPathDeleter) GetPath() string {
|
||||
return r.path
|
||||
}
|
||||
|
||||
// Delete for hostPath removes the local directory so long as it is beneath /tmp/*.
|
||||
// THIS IS FOR TESTING AND LOCAL DEVELOPMENT ONLY! This message should scare you away from using
|
||||
// this deleter for anything other than development and testing.
|
||||
func (r *hostPathDeleter) Delete() error {
|
||||
regexp := regexp.MustCompile("/tmp/.+")
|
||||
if !regexp.MatchString(r.GetPath()) {
|
||||
return fmt.Errorf("host_path deleter only supports /tmp/.+ but received provided %s", r.GetPath())
|
||||
}
|
||||
return os.RemoveAll(r.GetPath())
|
||||
}
|
||||
|
||||
func getVolumeSource(
|
||||
spec *volume.Spec) (*v1.HostPathVolumeSource, bool, error) {
|
||||
if spec.Volume != nil && spec.Volume.HostPath != nil {
|
||||
return spec.Volume.HostPath, spec.ReadOnly, nil
|
||||
} else if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.HostPath != nil {
|
||||
return spec.PersistentVolume.Spec.HostPath, spec.ReadOnly, nil
|
||||
}
|
||||
|
||||
return nil, false, fmt.Errorf("Spec does not reference an HostPath volume type")
|
||||
}
|
||||
276
vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path_test.go
generated
vendored
Normal file
276
vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,276 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package host_path
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("fake", nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/host-path")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if plug.GetPluginName() != "kubernetes.io/host-path" {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{}}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAccessModes(t *testing.T) {
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/host-path")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if len(plug.GetAccessModes()) != 1 || plug.GetAccessModes()[0] != v1.ReadWriteOnce {
|
||||
t.Errorf("Expected %s PersistentVolumeAccessMode", v1.ReadWriteOnce)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRecycler(t *testing.T) {
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
pluginHost := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil)
|
||||
plugMgr.InitPlugins([]volume.VolumePlugin{&hostPathPlugin{nil, volume.VolumeConfig{}}}, pluginHost)
|
||||
|
||||
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/foo"}}}}}
|
||||
plug, err := plugMgr.FindRecyclablePluginBySpec(spec)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
recycler, err := plug.NewRecycler("pv-name", spec, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Recycler: %v", err)
|
||||
}
|
||||
if recycler.GetPath() != spec.PersistentVolume.Spec.HostPath.Path {
|
||||
t.Errorf("Expected %s but got %s", spec.PersistentVolume.Spec.HostPath.Path, recycler.GetPath())
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleter(t *testing.T) {
|
||||
// Deleter has a hard-coded regex for "/tmp".
|
||||
tempPath := fmt.Sprintf("/tmp/hostpath/%s", uuid.NewUUID())
|
||||
defer os.RemoveAll(tempPath)
|
||||
err := os.MkdirAll(tempPath, 0750)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create tmp directory for deleter: %v", err)
|
||||
}
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil))
|
||||
|
||||
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: tempPath}}}}}
|
||||
plug, err := plugMgr.FindDeletablePluginBySpec(spec)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
deleter, err := plug.NewDeleter(spec)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Deleter: %v", err)
|
||||
}
|
||||
if deleter.GetPath() != tempPath {
|
||||
t.Errorf("Expected %s but got %s", tempPath, deleter.GetPath())
|
||||
}
|
||||
if err := deleter.Delete(); err != nil {
|
||||
t.Errorf("Mock Recycler expected to return nil but got %s", err)
|
||||
}
|
||||
if exists, _ := util.FileExists("foo"); exists {
|
||||
t.Errorf("Temp path expected to be deleted, but was found at %s", tempPath)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleterTempDir(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
expectedFailure bool
|
||||
path string
|
||||
}{
|
||||
"just-tmp": {true, "/tmp"},
|
||||
"not-tmp": {true, "/nottmp"},
|
||||
"good-tmp": {false, "/tmp/scratch"},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil))
|
||||
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: test.path}}}}}
|
||||
plug, _ := plugMgr.FindDeletablePluginBySpec(spec)
|
||||
deleter, _ := plug.NewDeleter(spec)
|
||||
err := deleter.Delete()
|
||||
if err == nil && test.expectedFailure {
|
||||
t.Errorf("Expected failure for test '%s' but got nil err", name)
|
||||
}
|
||||
if err != nil && !test.expectedFailure {
|
||||
t.Errorf("Unexpected failure for test '%s': %v", name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProvisioner(t *testing.T) {
|
||||
tempPath := fmt.Sprintf("/tmp/hostpath/%s", uuid.NewUUID())
|
||||
defer os.RemoveAll(tempPath)
|
||||
err := os.MkdirAll(tempPath, 0750)
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{ProvisioningEnabled: true}),
|
||||
volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil))
|
||||
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: tempPath}}}}}
|
||||
plug, err := plugMgr.FindCreatablePluginBySpec(spec)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
options := volume.VolumeOptions{
|
||||
PVC: volumetest.CreateTestPVC("1Gi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
|
||||
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
|
||||
}
|
||||
creater, err := plug.NewProvisioner(options)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Provisioner: %v", err)
|
||||
}
|
||||
pv, err := creater.Provision()
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error creating volume: %v", err)
|
||||
}
|
||||
if pv.Spec.HostPath.Path == "" {
|
||||
t.Errorf("Expected pv.Spec.HostPath.Path to not be empty: %#v", pv)
|
||||
}
|
||||
expectedCapacity := resource.NewQuantity(1*1024*1024*1024, resource.BinarySI)
|
||||
actualCapacity := pv.Spec.Capacity[v1.ResourceStorage]
|
||||
expectedAmt := expectedCapacity.Value()
|
||||
actualAmt := actualCapacity.Value()
|
||||
if expectedAmt != actualAmt {
|
||||
t.Errorf("Expected capacity %+v but got %+v", expectedAmt, actualAmt)
|
||||
}
|
||||
|
||||
if pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete {
|
||||
t.Errorf("Expected reclaim policy %+v but got %+v", v1.PersistentVolumeReclaimDelete, pv.Spec.PersistentVolumeReclaimPolicy)
|
||||
}
|
||||
|
||||
os.RemoveAll(pv.Spec.HostPath.Path)
|
||||
}
|
||||
|
||||
func TestPlugin(t *testing.T) {
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("fake", nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/host-path")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
spec := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/vol1"}},
|
||||
}
|
||||
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
path := mounter.GetPath()
|
||||
if path != "/vol1" {
|
||||
t.Errorf("Got unexpected path: %s", path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
|
||||
unmounter, err := plug.NewUnmounter("vol1", types.UID("poduid"))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
t.Errorf("Got a nil Unmounter")
|
||||
}
|
||||
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "pvA",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "foo"},
|
||||
},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
Name: "claimA",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
claim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "claimA",
|
||||
Namespace: "nsA",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pvA",
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimBound,
|
||||
},
|
||||
}
|
||||
|
||||
client := fake.NewSimpleClientset(pv, claim)
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", client, nil))
|
||||
plug, _ := plugMgr.FindPluginByName(hostPathPluginName)
|
||||
|
||||
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, true)
|
||||
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
|
||||
|
||||
if !mounter.GetAttributes().ReadOnly {
|
||||
t.Errorf("Expected true for mounter.IsReadOnly")
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue