forked from barak/tarpoon
Add glide.yaml and vendor deps
This commit is contained in:
parent
db918f12ad
commit
5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions
78
vendor/k8s.io/kubernetes/pkg/volume/BUILD
generated
vendored
Normal file
78
vendor/k8s.io/kubernetes/pkg/volume/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"metrics_cached.go",
|
||||
"metrics_du.go",
|
||||
"metrics_errors.go",
|
||||
"metrics_nil.go",
|
||||
"metrics_statfs.go",
|
||||
"plugins.go",
|
||||
"util.go",
|
||||
"volume.go",
|
||||
"volume_linux.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/errors:go_default_library",
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/release_1_5:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/fields:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/chmod:go_default_library",
|
||||
"//pkg/util/chown:go_default_library",
|
||||
"//pkg/util/errors:go_default_library",
|
||||
"//pkg/util/io:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/sets:go_default_library",
|
||||
"//pkg/util/validation:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/watch:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"metrics_nil_test.go",
|
||||
"metrics_statfs_test.go",
|
||||
"plugins_test.go",
|
||||
"util_test.go",
|
||||
],
|
||||
library = "go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/errors:go_default_library",
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/util/testing:go_default_library",
|
||||
"//pkg/watch:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_xtest",
|
||||
srcs = ["metrics_du_test.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/util/testing:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
5
vendor/k8s.io/kubernetes/pkg/volume/OWNERS
generated
vendored
Normal file
5
vendor/k8s.io/kubernetes/pkg/volume/OWNERS
generated
vendored
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
assignees:
|
||||
- saad-ali
|
||||
- thockin
|
||||
- pmorie
|
||||
- jsafrane
|
||||
56
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/BUILD
generated
vendored
Normal file
56
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"attacher.go",
|
||||
"aws_ebs.go",
|
||||
"aws_util.go",
|
||||
"doc.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/aws:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"attacher_test.go",
|
||||
"aws_ebs_test.go",
|
||||
],
|
||||
library = "go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
|
||||
"//pkg/cloudprovider/providers/aws:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/testing:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
3
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/OWNERS
generated
vendored
Normal file
3
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/OWNERS
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
maintainers:
|
||||
- jsafrane
|
||||
- justinsb
|
||||
250
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher.go
generated
vendored
Normal file
250
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher.go
generated
vendored
Normal file
|
|
@ -0,0 +1,250 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws_ebs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
type awsElasticBlockStoreAttacher struct {
|
||||
host volume.VolumeHost
|
||||
awsVolumes aws.Volumes
|
||||
}
|
||||
|
||||
var _ volume.Attacher = &awsElasticBlockStoreAttacher{}
|
||||
|
||||
var _ volume.AttachableVolumePlugin = &awsElasticBlockStorePlugin{}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) NewAttacher() (volume.Attacher, error) {
|
||||
awsCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &awsElasticBlockStoreAttacher{
|
||||
host: plugin.host,
|
||||
awsVolumes: awsCloud,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
|
||||
mounter := plugin.host.GetMounter()
|
||||
return mount.GetMountRefs(mounter, deviceMountPath)
|
||||
}
|
||||
|
||||
func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
volumeID := aws.KubernetesVolumeID(volumeSource.VolumeID)
|
||||
|
||||
// awsCloud.AttachDisk checks if disk is already attached to node and
|
||||
// succeeds in that case, so no need to do that separately.
|
||||
devicePath, err := attacher.awsVolumes.AttachDisk(volumeID, nodeName, readOnly)
|
||||
if err != nil {
|
||||
glog.Errorf("Error attaching volume %q: %+v", volumeID, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return devicePath, nil
|
||||
}
|
||||
|
||||
func (attacher *awsElasticBlockStoreAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
|
||||
volumesAttachedCheck := make(map[*volume.Spec]bool)
|
||||
volumeSpecMap := make(map[aws.KubernetesVolumeID]*volume.Spec)
|
||||
volumeIDList := []aws.KubernetesVolumeID{}
|
||||
for _, spec := range specs {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
name := aws.KubernetesVolumeID(volumeSource.VolumeID)
|
||||
volumeIDList = append(volumeIDList, name)
|
||||
volumesAttachedCheck[spec] = true
|
||||
volumeSpecMap[name] = spec
|
||||
}
|
||||
attachedResult, err := attacher.awsVolumes.DisksAreAttached(volumeIDList, nodeName)
|
||||
if err != nil {
|
||||
// Log error and continue with attach
|
||||
glog.Errorf(
|
||||
"Error checking if volumes (%v) is already attached to current node (%q). err=%v",
|
||||
volumeIDList, nodeName, err)
|
||||
return volumesAttachedCheck, err
|
||||
}
|
||||
|
||||
for volumeID, attached := range attachedResult {
|
||||
if !attached {
|
||||
spec := volumeSpecMap[volumeID]
|
||||
volumesAttachedCheck[spec] = false
|
||||
glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name())
|
||||
}
|
||||
}
|
||||
return volumesAttachedCheck, nil
|
||||
}
|
||||
|
||||
func (attacher *awsElasticBlockStoreAttacher) WaitForAttach(spec *volume.Spec, devicePath string, timeout time.Duration) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
volumeID := volumeSource.VolumeID
|
||||
partition := ""
|
||||
if volumeSource.Partition != 0 {
|
||||
partition = strconv.Itoa(int(volumeSource.Partition))
|
||||
}
|
||||
|
||||
if devicePath == "" {
|
||||
return "", fmt.Errorf("WaitForAttach failed for AWS Volume %q: devicePath is empty.", volumeID)
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(checkSleepDuration)
|
||||
defer ticker.Stop()
|
||||
timer := time.NewTimer(timeout)
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
glog.V(5).Infof("Checking AWS Volume %q is attached.", volumeID)
|
||||
if devicePath != "" {
|
||||
devicePaths := getDiskByIdPaths(partition, devicePath)
|
||||
path, err := verifyDevicePath(devicePaths)
|
||||
if err != nil {
|
||||
// Log error, if any, and continue checking periodically. See issue #11321
|
||||
glog.Errorf("Error verifying AWS Volume (%q) is attached: %v", volumeID, err)
|
||||
} else if path != "" {
|
||||
// A device path has successfully been created for the PD
|
||||
glog.Infof("Successfully found attached AWS Volume %q.", volumeID)
|
||||
return path, nil
|
||||
}
|
||||
} else {
|
||||
glog.V(5).Infof("AWS Volume (%q) is not attached yet", volumeID)
|
||||
}
|
||||
case <-timer.C:
|
||||
return "", fmt.Errorf("Could not find attached AWS Volume %q. Timeout waiting for mount paths to be created.", volumeID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (attacher *awsElasticBlockStoreAttacher) GetDeviceMountPath(
|
||||
spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return makeGlobalPDPath(attacher.host, aws.KubernetesVolumeID(volumeSource.VolumeID)), nil
|
||||
}
|
||||
|
||||
// FIXME: this method can be further pruned.
|
||||
func (attacher *awsElasticBlockStoreAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
||||
mounter := attacher.host.GetMounter()
|
||||
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
|
||||
return err
|
||||
}
|
||||
notMnt = true
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := []string{}
|
||||
if readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
if notMnt {
|
||||
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}
|
||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, options)
|
||||
if err != nil {
|
||||
os.Remove(deviceMountPath)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type awsElasticBlockStoreDetacher struct {
|
||||
mounter mount.Interface
|
||||
awsVolumes aws.Volumes
|
||||
}
|
||||
|
||||
var _ volume.Detacher = &awsElasticBlockStoreDetacher{}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) NewDetacher() (volume.Detacher, error) {
|
||||
awsCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &awsElasticBlockStoreDetacher{
|
||||
mounter: plugin.host.GetMounter(),
|
||||
awsVolumes: awsCloud,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (detacher *awsElasticBlockStoreDetacher) Detach(deviceMountPath string, nodeName types.NodeName) error {
|
||||
volumeID := aws.KubernetesVolumeID(path.Base(deviceMountPath))
|
||||
|
||||
attached, err := detacher.awsVolumes.DiskIsAttached(volumeID, nodeName)
|
||||
if err != nil {
|
||||
// Log error and continue with detach
|
||||
glog.Errorf(
|
||||
"Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v",
|
||||
volumeID, nodeName, err)
|
||||
}
|
||||
|
||||
if err == nil && !attached {
|
||||
// Volume is already detached from node.
|
||||
glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volumeID, nodeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err = detacher.awsVolumes.DetachDisk(volumeID, nodeName); err != nil {
|
||||
glog.Errorf("Error detaching volumeID %q: %v", volumeID, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (detacher *awsElasticBlockStoreDetacher) UnmountDevice(deviceMountPath string) error {
|
||||
return volumeutil.UnmountPath(deviceMountPath, detacher.mounter)
|
||||
}
|
||||
342
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher_test.go
generated
vendored
Normal file
342
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,342 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws_ebs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
func TestGetVolumeName_Volume(t *testing.T) {
|
||||
plugin := newPlugin()
|
||||
name := aws.KubernetesVolumeID("my-aws-volume")
|
||||
spec := createVolSpec(name, false)
|
||||
|
||||
volumeName, err := plugin.GetVolumeName(spec)
|
||||
if err != nil {
|
||||
t.Errorf("GetVolumeName error: %v", err)
|
||||
}
|
||||
if volumeName != string(name) {
|
||||
t.Errorf("GetVolumeName error: expected %s, got %s", name, volumeName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetVolumeName_PersistentVolume(t *testing.T) {
|
||||
plugin := newPlugin()
|
||||
name := aws.KubernetesVolumeID("my-aws-pv")
|
||||
spec := createPVSpec(name, true)
|
||||
|
||||
volumeName, err := plugin.GetVolumeName(spec)
|
||||
if err != nil {
|
||||
t.Errorf("GetVolumeName error: %v", err)
|
||||
}
|
||||
if volumeName != string(name) {
|
||||
t.Errorf("GetVolumeName error: expected %s, got %s", name, volumeName)
|
||||
}
|
||||
}
|
||||
|
||||
// One testcase for TestAttachDetach table test below
|
||||
type testcase struct {
|
||||
name aws.KubernetesVolumeID
|
||||
// For fake AWS:
|
||||
attach attachCall
|
||||
detach detachCall
|
||||
diskIsAttached diskIsAttachedCall
|
||||
t *testing.T
|
||||
|
||||
// Actual test to run
|
||||
test func(test *testcase) (string, error)
|
||||
// Expected return of the test
|
||||
expectedDevice string
|
||||
expectedError error
|
||||
}
|
||||
|
||||
func TestAttachDetach(t *testing.T) {
|
||||
diskName := aws.KubernetesVolumeID("disk")
|
||||
nodeName := types.NodeName("instance")
|
||||
readOnly := false
|
||||
spec := createVolSpec(diskName, readOnly)
|
||||
attachError := errors.New("Fake attach error")
|
||||
detachError := errors.New("Fake detach error")
|
||||
diskCheckError := errors.New("Fake DiskIsAttached error")
|
||||
tests := []testcase{
|
||||
// Successful Attach call
|
||||
{
|
||||
name: "Attach_Positive",
|
||||
attach: attachCall{diskName, nodeName, readOnly, "/dev/sda", nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedDevice: "/dev/sda",
|
||||
},
|
||||
|
||||
// Attach call fails
|
||||
{
|
||||
name: "Attach_Negative",
|
||||
attach: attachCall{diskName, nodeName, readOnly, "", attachError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedError: attachError,
|
||||
},
|
||||
|
||||
// Detach succeeds
|
||||
{
|
||||
name: "Detach_Positive",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, true, nil},
|
||||
detach: detachCall{diskName, nodeName, "/dev/sda", nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
mountPath := "/mnt/" + string(diskName)
|
||||
return "", detacher.Detach(mountPath, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
// Disk is already detached
|
||||
{
|
||||
name: "Detach_Positive_AlreadyDetached",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
mountPath := "/mnt/" + string(diskName)
|
||||
return "", detacher.Detach(mountPath, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
// Detach succeeds when DiskIsAttached fails
|
||||
{
|
||||
name: "Detach_Positive_CheckFails",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
|
||||
detach: detachCall{diskName, nodeName, "/dev/sda", nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
mountPath := "/mnt/" + string(diskName)
|
||||
return "", detacher.Detach(mountPath, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
// Detach fails
|
||||
{
|
||||
name: "Detach_Negative",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
|
||||
detach: detachCall{diskName, nodeName, "", detachError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
mountPath := "/mnt/" + string(diskName)
|
||||
return "", detacher.Detach(mountPath, nodeName)
|
||||
},
|
||||
expectedError: detachError,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range tests {
|
||||
testcase.t = t
|
||||
device, err := testcase.test(&testcase)
|
||||
if err != testcase.expectedError {
|
||||
t.Errorf("%s failed: expected err=%q, got %q", testcase.name, testcase.expectedError.Error(), err.Error())
|
||||
}
|
||||
if device != testcase.expectedDevice {
|
||||
t.Errorf("%s failed: expected device=%q, got %q", testcase.name, testcase.expectedDevice, device)
|
||||
}
|
||||
t.Logf("Test %q succeeded", testcase.name)
|
||||
}
|
||||
}
|
||||
|
||||
// newPlugin creates a new gcePersistentDiskPlugin with fake cloud, NewAttacher
|
||||
// and NewDetacher won't work.
|
||||
func newPlugin() *awsElasticBlockStorePlugin {
|
||||
host := volumetest.NewFakeVolumeHost("/tmp", nil, nil)
|
||||
plugins := ProbeVolumePlugins()
|
||||
plugin := plugins[0]
|
||||
plugin.Init(host)
|
||||
return plugin.(*awsElasticBlockStorePlugin)
|
||||
}
|
||||
|
||||
func newAttacher(testcase *testcase) *awsElasticBlockStoreAttacher {
|
||||
return &awsElasticBlockStoreAttacher{
|
||||
host: nil,
|
||||
awsVolumes: testcase,
|
||||
}
|
||||
}
|
||||
|
||||
func newDetacher(testcase *testcase) *awsElasticBlockStoreDetacher {
|
||||
return &awsElasticBlockStoreDetacher{
|
||||
awsVolumes: testcase,
|
||||
}
|
||||
}
|
||||
|
||||
func createVolSpec(name aws.KubernetesVolumeID, readOnly bool) *volume.Spec {
|
||||
return &volume.Spec{
|
||||
Volume: &v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: string(name),
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createPVSpec(name aws.KubernetesVolumeID, readOnly bool) *volume.Spec {
|
||||
return &volume.Spec{
|
||||
PersistentVolume: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: string(name),
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Fake AWS implementation
|
||||
|
||||
type attachCall struct {
|
||||
diskName aws.KubernetesVolumeID
|
||||
nodeName types.NodeName
|
||||
readOnly bool
|
||||
retDeviceName string
|
||||
ret error
|
||||
}
|
||||
|
||||
type detachCall struct {
|
||||
diskName aws.KubernetesVolumeID
|
||||
nodeName types.NodeName
|
||||
retDeviceName string
|
||||
ret error
|
||||
}
|
||||
|
||||
type diskIsAttachedCall struct {
|
||||
diskName aws.KubernetesVolumeID
|
||||
nodeName types.NodeName
|
||||
isAttached bool
|
||||
ret error
|
||||
}
|
||||
|
||||
func (testcase *testcase) AttachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName, readOnly bool) (string, error) {
|
||||
expected := &testcase.attach
|
||||
|
||||
if expected.diskName == "" && expected.nodeName == "" {
|
||||
// testcase.attach looks uninitialized, test did not expect to call
|
||||
// AttachDisk
|
||||
testcase.t.Errorf("Unexpected AttachDisk call!")
|
||||
return "", errors.New("Unexpected AttachDisk call!")
|
||||
}
|
||||
|
||||
if expected.diskName != diskName {
|
||||
testcase.t.Errorf("Unexpected AttachDisk call: expected diskName %s, got %s", expected.diskName, diskName)
|
||||
return "", errors.New("Unexpected AttachDisk call: wrong diskName")
|
||||
}
|
||||
|
||||
if expected.nodeName != nodeName {
|
||||
testcase.t.Errorf("Unexpected AttachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
|
||||
return "", errors.New("Unexpected AttachDisk call: wrong nodeName")
|
||||
}
|
||||
|
||||
if expected.readOnly != readOnly {
|
||||
testcase.t.Errorf("Unexpected AttachDisk call: expected readOnly %v, got %v", expected.readOnly, readOnly)
|
||||
return "", errors.New("Unexpected AttachDisk call: wrong readOnly")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("AttachDisk call: %s, %s, %v, returning %q, %v", diskName, nodeName, readOnly, expected.retDeviceName, expected.ret)
|
||||
|
||||
return expected.retDeviceName, expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) DetachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName) (string, error) {
|
||||
expected := &testcase.detach
|
||||
|
||||
if expected.diskName == "" && expected.nodeName == "" {
|
||||
// testcase.detach looks uninitialized, test did not expect to call
|
||||
// DetachDisk
|
||||
testcase.t.Errorf("Unexpected DetachDisk call!")
|
||||
return "", errors.New("Unexpected DetachDisk call!")
|
||||
}
|
||||
|
||||
if expected.diskName != diskName {
|
||||
testcase.t.Errorf("Unexpected DetachDisk call: expected diskName %s, got %s", expected.diskName, diskName)
|
||||
return "", errors.New("Unexpected DetachDisk call: wrong diskName")
|
||||
}
|
||||
|
||||
if expected.nodeName != nodeName {
|
||||
testcase.t.Errorf("Unexpected DetachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
|
||||
return "", errors.New("Unexpected DetachDisk call: wrong nodeName")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DetachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceName, expected.ret)
|
||||
|
||||
return expected.retDeviceName, expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) DiskIsAttached(diskName aws.KubernetesVolumeID, nodeName types.NodeName) (bool, error) {
|
||||
expected := &testcase.diskIsAttached
|
||||
|
||||
if expected.diskName == "" && expected.nodeName == "" {
|
||||
// testcase.diskIsAttached looks uninitialized, test did not expect to
|
||||
// call DiskIsAttached
|
||||
testcase.t.Errorf("Unexpected DiskIsAttached call!")
|
||||
return false, errors.New("Unexpected DiskIsAttached call!")
|
||||
}
|
||||
|
||||
if expected.diskName != diskName {
|
||||
testcase.t.Errorf("Unexpected DiskIsAttached call: expected diskName %s, got %s", expected.diskName, diskName)
|
||||
return false, errors.New("Unexpected DiskIsAttached call: wrong diskName")
|
||||
}
|
||||
|
||||
if expected.nodeName != nodeName {
|
||||
testcase.t.Errorf("Unexpected DiskIsAttached call: expected nodeName %s, got %s", expected.nodeName, nodeName)
|
||||
return false, errors.New("Unexpected DiskIsAttached call: wrong nodeName")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, nodeName, expected.isAttached, expected.ret)
|
||||
|
||||
return expected.isAttached, expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) DisksAreAttached(diskNames []aws.KubernetesVolumeID, nodeName types.NodeName) (map[aws.KubernetesVolumeID]bool, error) {
|
||||
return nil, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (testcase *testcase) CreateDisk(volumeOptions *aws.VolumeOptions) (volumeName aws.KubernetesVolumeID, err error) {
|
||||
return "", errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (testcase *testcase) DeleteDisk(volumeName aws.KubernetesVolumeID) (bool, error) {
|
||||
return false, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (testcase *testcase) GetVolumeLabels(volumeName aws.KubernetesVolumeID) (map[string]string, error) {
|
||||
return map[string]string{}, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (testcase *testcase) GetDiskPath(volumeName aws.KubernetesVolumeID) (string, error) {
|
||||
return "", errors.New("Not implemented")
|
||||
}
|
||||
493
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go
generated
vendored
Normal file
493
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go
generated
vendored
Normal file
|
|
@ -0,0 +1,493 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws_ebs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&awsElasticBlockStorePlugin{nil}}
|
||||
}
|
||||
|
||||
type awsElasticBlockStorePlugin struct {
|
||||
host volume.VolumeHost
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &awsElasticBlockStorePlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &awsElasticBlockStorePlugin{}
|
||||
var _ volume.DeletableVolumePlugin = &awsElasticBlockStorePlugin{}
|
||||
var _ volume.ProvisionableVolumePlugin = &awsElasticBlockStorePlugin{}
|
||||
|
||||
const (
|
||||
awsElasticBlockStorePluginName = "kubernetes.io/aws-ebs"
|
||||
awsURLNamePrefix = "aws://"
|
||||
)
|
||||
|
||||
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
|
||||
return host.GetPodVolumeDir(uid, kstrings.EscapeQualifiedNameForDisk(awsElasticBlockStorePluginName), volName)
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) GetPluginName() string {
|
||||
return awsElasticBlockStorePluginName
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return volumeSource.VolumeID, nil
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) CanSupport(spec *volume.Spec) bool {
|
||||
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AWSElasticBlockStore != nil) ||
|
||||
(spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil)
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) RequiresRemount() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
return []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
// Inject real implementations here, test through the internal function.
|
||||
return plugin.newMounterInternal(spec, pod.UID, &AWSDiskUtil{}, plugin.host.GetMounter())
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Mounter, error) {
|
||||
// EBSs used directly in a pod have a ReadOnly flag set by the pod author.
|
||||
// EBSs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
|
||||
ebs, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volumeID := aws.KubernetesVolumeID(ebs.VolumeID)
|
||||
fsType := ebs.FSType
|
||||
partition := ""
|
||||
if ebs.Partition != 0 {
|
||||
partition = strconv.Itoa(int(ebs.Partition))
|
||||
}
|
||||
|
||||
return &awsElasticBlockStoreMounter{
|
||||
awsElasticBlockStore: &awsElasticBlockStore{
|
||||
podUID: podUID,
|
||||
volName: spec.Name(),
|
||||
volumeID: volumeID,
|
||||
partition: partition,
|
||||
manager: manager,
|
||||
mounter: mounter,
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
|
||||
},
|
||||
fsType: fsType,
|
||||
readOnly: readOnly,
|
||||
diskMounter: &mount.SafeFormatAndMount{Interface: plugin.host.GetMounter(), Runner: exec.New()}}, nil
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
// Inject real implementations here, test through the internal function.
|
||||
return plugin.newUnmounterInternal(volName, podUID, &AWSDiskUtil{}, plugin.host.GetMounter())
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) newUnmounterInternal(volName string, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Unmounter, error) {
|
||||
return &awsElasticBlockStoreUnmounter{&awsElasticBlockStore{
|
||||
podUID: podUID,
|
||||
volName: volName,
|
||||
manager: manager,
|
||||
mounter: mounter,
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)),
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
||||
return plugin.newDeleterInternal(spec, &AWSDiskUtil{})
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) newDeleterInternal(spec *volume.Spec, manager ebsManager) (volume.Deleter, error) {
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AWSElasticBlockStore == nil {
|
||||
glog.Errorf("spec.PersistentVolumeSource.AWSElasticBlockStore is nil")
|
||||
return nil, fmt.Errorf("spec.PersistentVolumeSource.AWSElasticBlockStore is nil")
|
||||
}
|
||||
return &awsElasticBlockStoreDeleter{
|
||||
awsElasticBlockStore: &awsElasticBlockStore{
|
||||
volName: spec.Name(),
|
||||
volumeID: aws.KubernetesVolumeID(spec.PersistentVolume.Spec.AWSElasticBlockStore.VolumeID),
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
return plugin.newProvisionerInternal(options, &AWSDiskUtil{})
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) newProvisionerInternal(options volume.VolumeOptions, manager ebsManager) (volume.Provisioner, error) {
|
||||
return &awsElasticBlockStoreProvisioner{
|
||||
awsElasticBlockStore: &awsElasticBlockStore{
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
},
|
||||
options: options,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getVolumeSource(
|
||||
spec *volume.Spec) (*v1.AWSElasticBlockStoreVolumeSource, bool, error) {
|
||||
if spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil {
|
||||
return spec.Volume.AWSElasticBlockStore, spec.Volume.AWSElasticBlockStore.ReadOnly, nil
|
||||
} else if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.AWSElasticBlockStore != nil {
|
||||
return spec.PersistentVolume.Spec.AWSElasticBlockStore, spec.ReadOnly, nil
|
||||
}
|
||||
|
||||
return nil, false, fmt.Errorf("Spec does not reference an AWS EBS volume type")
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) ConstructVolumeSpec(volName, mountPath string) (*volume.Spec, error) {
|
||||
mounter := plugin.host.GetMounter()
|
||||
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
|
||||
volumeID, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// This is a workaround to fix the issue in converting aws volume id from globalPDPath
|
||||
// There are three aws volume id formats and their volumeID from GetDeviceNameFromMount() are:
|
||||
// aws:///vol-1234 (aws/vol-1234)
|
||||
// aws://us-east-1/vol-1234 (aws/us-east-1/vol-1234)
|
||||
// vol-1234 (vol-1234)
|
||||
// This code is for converting volume id to aws style volume id for the first two cases.
|
||||
sourceName := volumeID
|
||||
if strings.HasPrefix(volumeID, "aws/") {
|
||||
names := strings.Split(volumeID, "/")
|
||||
length := len(names)
|
||||
if length < 2 || length > 3 {
|
||||
return nil, fmt.Errorf("Failed to get AWS volume id from mount path %q: invalid volume name format %q", mountPath, volumeID)
|
||||
}
|
||||
volName := names[length-1]
|
||||
if !strings.HasPrefix(volName, "vol-") {
|
||||
return nil, fmt.Errorf("Invalid volume name format for AWS volume (%q) retrieved from mount path %q", volName, mountPath)
|
||||
}
|
||||
if length == 2 {
|
||||
sourceName = awsURLNamePrefix + "" + "/" + volName // empty zone label
|
||||
}
|
||||
if length == 3 {
|
||||
sourceName = awsURLNamePrefix + names[1] + "/" + volName // names[1] is the zone label
|
||||
}
|
||||
glog.V(4).Infof("Convert aws volume name from %q to %q ", volumeID, sourceName)
|
||||
}
|
||||
|
||||
awsVolume := &v1.Volume{
|
||||
Name: volName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: sourceName,
|
||||
},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(awsVolume), nil
|
||||
}
|
||||
|
||||
// Abstract interface to PD operations.
|
||||
type ebsManager interface {
|
||||
CreateVolume(provisioner *awsElasticBlockStoreProvisioner) (volumeID aws.KubernetesVolumeID, volumeSizeGB int, labels map[string]string, err error)
|
||||
// Deletes a volume
|
||||
DeleteVolume(deleter *awsElasticBlockStoreDeleter) error
|
||||
}
|
||||
|
||||
// awsElasticBlockStore volumes are disk resources provided by Amazon Web Services
|
||||
// that are attached to the kubelet's host machine and exposed to the pod.
|
||||
type awsElasticBlockStore struct {
|
||||
volName string
|
||||
podUID types.UID
|
||||
// Unique id of the PD, used to find the disk resource in the provider.
|
||||
volumeID aws.KubernetesVolumeID
|
||||
// Specifies the partition to mount
|
||||
partition string
|
||||
// Utility interface that provides API calls to the provider to attach/detach disks.
|
||||
manager ebsManager
|
||||
// Mounter interface that provides system calls to mount the global path to the pod local path.
|
||||
mounter mount.Interface
|
||||
plugin *awsElasticBlockStorePlugin
|
||||
volume.MetricsProvider
|
||||
}
|
||||
|
||||
type awsElasticBlockStoreMounter struct {
|
||||
*awsElasticBlockStore
|
||||
// Filesystem type, optional.
|
||||
fsType string
|
||||
// Specifies whether the disk will be attached as read-only.
|
||||
readOnly bool
|
||||
// diskMounter provides the interface that is used to mount the actual block device.
|
||||
diskMounter *mount.SafeFormatAndMount
|
||||
}
|
||||
|
||||
var _ volume.Mounter = &awsElasticBlockStoreMounter{}
|
||||
|
||||
func (b *awsElasticBlockStoreMounter) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: b.readOnly,
|
||||
Managed: !b.readOnly,
|
||||
SupportsSELinux: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (b *awsElasticBlockStoreMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUp attaches the disk and bind mounts to the volume path.
|
||||
func (b *awsElasticBlockStoreMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
// SetUpAt attaches the disk and bind mounts to the volume path.
|
||||
func (b *awsElasticBlockStoreMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
// TODO: handle failed mounts here.
|
||||
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
glog.V(4).Infof("PersistentDisk set up: %s %v %v", dir, !notMnt, err)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
glog.Errorf("cannot validate mount point: %s %v", dir, err)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
return nil
|
||||
}
|
||||
|
||||
globalPDPath := makeGlobalPDPath(b.plugin.host, b.volumeID)
|
||||
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
|
||||
options := []string{"bind"}
|
||||
if b.readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
err = b.mounter.Mount(globalPDPath, dir, "", options)
|
||||
if err != nil {
|
||||
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
|
||||
glog.Errorf("failed to unmount %s: %v", dir, mntErr)
|
||||
return err
|
||||
}
|
||||
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
// This is very odd, we don't expect it. We'll try again next sync loop.
|
||||
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir)
|
||||
return err
|
||||
}
|
||||
}
|
||||
os.Remove(dir)
|
||||
glog.Errorf("Mount of disk %s failed: %v", dir, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !b.readOnly {
|
||||
volume.SetVolumeOwnership(b, fsGroup)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Successfully mounted %s", dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeGlobalPDPath(host volume.VolumeHost, volumeID aws.KubernetesVolumeID) string {
|
||||
// Clean up the URI to be more fs-friendly
|
||||
name := string(volumeID)
|
||||
name = strings.Replace(name, "://", "/", -1)
|
||||
return path.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath, name)
|
||||
}
|
||||
|
||||
// Reverses the mapping done in makeGlobalPDPath
|
||||
func getVolumeIDFromGlobalMount(host volume.VolumeHost, globalPath string) (string, error) {
|
||||
basePath := path.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath)
|
||||
rel, err := filepath.Rel(basePath, globalPath)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get volume id from global mount %s - %v", globalPath, err)
|
||||
return "", err
|
||||
}
|
||||
if strings.Contains(rel, "../") {
|
||||
glog.Errorf("Unexpected mount path: %s", globalPath)
|
||||
return "", fmt.Errorf("unexpected mount path: " + globalPath)
|
||||
}
|
||||
// Reverse the :// replacement done in makeGlobalPDPath
|
||||
volumeID := rel
|
||||
if strings.HasPrefix(volumeID, "aws/") {
|
||||
volumeID = strings.Replace(volumeID, "aws/", "aws://", 1)
|
||||
}
|
||||
glog.V(2).Info("Mapping mount dir ", globalPath, " to volumeID ", volumeID)
|
||||
return volumeID, nil
|
||||
}
|
||||
|
||||
func (ebs *awsElasticBlockStore) GetPath() string {
|
||||
return getPath(ebs.podUID, ebs.volName, ebs.plugin.host)
|
||||
}
|
||||
|
||||
type awsElasticBlockStoreUnmounter struct {
|
||||
*awsElasticBlockStore
|
||||
}
|
||||
|
||||
var _ volume.Unmounter = &awsElasticBlockStoreUnmounter{}
|
||||
|
||||
// Unmounts the bind mount, and detaches the disk only if the PD
|
||||
// resource was the last reference to that disk on the kubelet.
|
||||
func (c *awsElasticBlockStoreUnmounter) TearDown() error {
|
||||
return c.TearDownAt(c.GetPath())
|
||||
}
|
||||
|
||||
// Unmounts the bind mount
|
||||
func (c *awsElasticBlockStoreUnmounter) TearDownAt(dir string) error {
|
||||
notMnt, err := c.mounter.IsLikelyNotMountPoint(dir)
|
||||
if err != nil {
|
||||
glog.V(2).Info("Error checking if mountpoint ", dir, ": ", err)
|
||||
return err
|
||||
}
|
||||
if notMnt {
|
||||
glog.V(2).Info("Not mountpoint, deleting")
|
||||
return os.Remove(dir)
|
||||
}
|
||||
|
||||
// Unmount the bind-mount inside this pod
|
||||
if err := c.mounter.Unmount(dir); err != nil {
|
||||
glog.V(2).Info("Error unmounting dir ", dir, ": ", err)
|
||||
return err
|
||||
}
|
||||
notMnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if notMnt {
|
||||
if err := os.Remove(dir); err != nil {
|
||||
glog.V(2).Info("Error removing mountpoint ", dir, ": ", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type awsElasticBlockStoreDeleter struct {
|
||||
*awsElasticBlockStore
|
||||
}
|
||||
|
||||
var _ volume.Deleter = &awsElasticBlockStoreDeleter{}
|
||||
|
||||
func (d *awsElasticBlockStoreDeleter) GetPath() string {
|
||||
return getPath(d.podUID, d.volName, d.plugin.host)
|
||||
}
|
||||
|
||||
func (d *awsElasticBlockStoreDeleter) Delete() error {
|
||||
return d.manager.DeleteVolume(d)
|
||||
}
|
||||
|
||||
type awsElasticBlockStoreProvisioner struct {
|
||||
*awsElasticBlockStore
|
||||
options volume.VolumeOptions
|
||||
namespace string
|
||||
}
|
||||
|
||||
var _ volume.Provisioner = &awsElasticBlockStoreProvisioner{}
|
||||
|
||||
func (c *awsElasticBlockStoreProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
volumeID, sizeGB, labels, err := c.manager.CreateVolume(c)
|
||||
if err != nil {
|
||||
glog.Errorf("Provision failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: c.options.PVName,
|
||||
Labels: map[string]string{},
|
||||
Annotations: map[string]string{
|
||||
"kubernetes.io/createdby": "aws-ebs-dynamic-provisioner",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
|
||||
AccessModes: c.options.PVC.Spec.AccessModes,
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: string(volumeID),
|
||||
FSType: "ext4",
|
||||
Partition: 0,
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if len(c.options.PVC.Spec.AccessModes) == 0 {
|
||||
pv.Spec.AccessModes = c.plugin.GetAccessModes()
|
||||
}
|
||||
|
||||
if len(labels) != 0 {
|
||||
if pv.Labels == nil {
|
||||
pv.Labels = make(map[string]string)
|
||||
}
|
||||
for k, v := range labels {
|
||||
pv.Labels[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return pv, nil
|
||||
}
|
||||
298
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_test.go
generated
vendored
Normal file
298
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,298 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws_ebs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if plug.GetPluginName() != "kubernetes.io/aws-ebs" {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{}}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAccessModes(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/aws-ebs")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
|
||||
if !contains(plug.GetAccessModes(), v1.ReadWriteOnce) {
|
||||
t.Errorf("Expected to support AccessModeTypes: %s", v1.ReadWriteOnce)
|
||||
}
|
||||
if contains(plug.GetAccessModes(), v1.ReadOnlyMany) {
|
||||
t.Errorf("Expected not to support AccessModeTypes: %s", v1.ReadOnlyMany)
|
||||
}
|
||||
}
|
||||
|
||||
func contains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
|
||||
for _, m := range modes {
|
||||
if m == mode {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type fakePDManager struct {
|
||||
}
|
||||
|
||||
// TODO(jonesdl) To fully test this, we could create a loopback device
|
||||
// and mount that instead.
|
||||
func (fake *fakePDManager) CreateVolume(c *awsElasticBlockStoreProvisioner) (volumeID aws.KubernetesVolumeID, volumeSizeGB int, labels map[string]string, err error) {
|
||||
labels = make(map[string]string)
|
||||
labels["fakepdmanager"] = "yes"
|
||||
return "test-aws-volume-name", 100, labels, nil
|
||||
}
|
||||
|
||||
func (fake *fakePDManager) DeleteVolume(cd *awsElasticBlockStoreDeleter) error {
|
||||
if cd.volumeID != "test-aws-volume-name" {
|
||||
return fmt.Errorf("Deleter got unexpected volume name: %s", cd.volumeID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestPlugin(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
spec := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: "pd",
|
||||
FSType: "ext4",
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeManager := &fakePDManager{}
|
||||
fakeMounter := &mount.FakeMounter{}
|
||||
mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~aws-ebs/vol1")
|
||||
path := mounter.GetPath()
|
||||
if path != volPath {
|
||||
t.Errorf("Got unexpected path: %s", path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
fakeManager = &fakePDManager{}
|
||||
unmounter, err := plug.(*awsElasticBlockStorePlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager, fakeMounter)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
t.Errorf("Got a nil Unmounter")
|
||||
}
|
||||
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
t.Errorf("TearDown() failed, volume path still exists: %s", path)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
|
||||
// Test Provisioner
|
||||
options := volume.VolumeOptions{
|
||||
PVC: volumetest.CreateTestPVC("100Mi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
|
||||
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
|
||||
}
|
||||
provisioner, err := plug.(*awsElasticBlockStorePlugin).newProvisionerInternal(options, &fakePDManager{})
|
||||
persistentSpec, err := provisioner.Provision()
|
||||
if err != nil {
|
||||
t.Errorf("Provision() failed: %v", err)
|
||||
}
|
||||
|
||||
if persistentSpec.Spec.PersistentVolumeSource.AWSElasticBlockStore.VolumeID != "test-aws-volume-name" {
|
||||
t.Errorf("Provision() returned unexpected volume ID: %s", persistentSpec.Spec.PersistentVolumeSource.AWSElasticBlockStore.VolumeID)
|
||||
}
|
||||
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
|
||||
size := cap.Value()
|
||||
if size != 100*1024*1024*1024 {
|
||||
t.Errorf("Provision() returned unexpected volume size: %v", size)
|
||||
}
|
||||
|
||||
if persistentSpec.Labels["fakepdmanager"] != "yes" {
|
||||
t.Errorf("Provision() returned unexpected labels: %v", persistentSpec.Labels)
|
||||
}
|
||||
|
||||
// Test Deleter
|
||||
volSpec := &volume.Spec{
|
||||
PersistentVolume: persistentSpec,
|
||||
}
|
||||
deleter, err := plug.(*awsElasticBlockStorePlugin).newDeleterInternal(volSpec, &fakePDManager{})
|
||||
err = deleter.Delete()
|
||||
if err != nil {
|
||||
t.Errorf("Deleter() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "pvA",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{},
|
||||
},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
Name: "claimA",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
claim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "claimA",
|
||||
Namespace: "nsA",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pvA",
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimBound,
|
||||
},
|
||||
}
|
||||
|
||||
clientset := fake.NewSimpleClientset(pv, claim)
|
||||
|
||||
tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, clientset, nil))
|
||||
plug, _ := plugMgr.FindPluginByName(awsElasticBlockStorePluginName)
|
||||
|
||||
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, true)
|
||||
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
|
||||
|
||||
if !mounter.GetAttributes().ReadOnly {
|
||||
t.Errorf("Expected true for mounter.IsReadOnly")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMounterAndUnmounterTypeAssert(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
spec := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: "pd",
|
||||
FSType: "ext4",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{})
|
||||
if _, ok := mounter.(volume.Unmounter); ok {
|
||||
t.Errorf("Volume Mounter can be type-assert to Unmounter")
|
||||
}
|
||||
|
||||
unmounter, err := plug.(*awsElasticBlockStorePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{})
|
||||
if _, ok := unmounter.(volume.Mounter); ok {
|
||||
t.Errorf("Volume Unmounter can be type-assert to Mounter")
|
||||
}
|
||||
}
|
||||
191
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go
generated
vendored
Normal file
191
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go
generated
vendored
Normal file
|
|
@ -0,0 +1,191 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws_ebs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
const (
|
||||
diskPartitionSuffix = ""
|
||||
diskXVDPath = "/dev/xvd"
|
||||
diskXVDPattern = "/dev/xvd*"
|
||||
maxChecks = 60
|
||||
maxRetries = 10
|
||||
checkSleepDuration = time.Second
|
||||
errorSleepDuration = 5 * time.Second
|
||||
)
|
||||
|
||||
type AWSDiskUtil struct{}
|
||||
|
||||
func (util *AWSDiskUtil) DeleteVolume(d *awsElasticBlockStoreDeleter) error {
|
||||
cloud, err := getCloudProvider(d.awsElasticBlockStore.plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
deleted, err := cloud.DeleteDisk(d.volumeID)
|
||||
if err != nil {
|
||||
// AWS cloud provider returns volume.deletedVolumeInUseError when
|
||||
// necessary, no handling needed here.
|
||||
glog.V(2).Infof("Error deleting EBS Disk volume %s: %v", d.volumeID, err)
|
||||
return err
|
||||
}
|
||||
if deleted {
|
||||
glog.V(2).Infof("Successfully deleted EBS Disk volume %s", d.volumeID)
|
||||
} else {
|
||||
glog.V(2).Infof("Successfully deleted EBS Disk volume %s (actually already deleted)", d.volumeID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateVolume creates an AWS EBS volume.
|
||||
// Returns: volumeID, volumeSizeGB, labels, error
|
||||
func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (aws.KubernetesVolumeID, int, map[string]string, error) {
|
||||
cloud, err := getCloudProvider(c.awsElasticBlockStore.plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
return "", 0, nil, err
|
||||
}
|
||||
|
||||
// AWS volumes don't have Name field, store the name in Name tag
|
||||
var tags map[string]string
|
||||
if c.options.CloudTags == nil {
|
||||
tags = make(map[string]string)
|
||||
} else {
|
||||
tags = *c.options.CloudTags
|
||||
}
|
||||
tags["Name"] = volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // AWS tags can have 255 characters
|
||||
|
||||
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
requestBytes := capacity.Value()
|
||||
// AWS works with gigabytes, convert to GiB with rounding up
|
||||
requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||
volumeOptions := &aws.VolumeOptions{
|
||||
CapacityGB: requestGB,
|
||||
Tags: tags,
|
||||
PVCName: c.options.PVC.Name,
|
||||
}
|
||||
// Apply Parameters (case-insensitive). We leave validation of
|
||||
// the values to the cloud provider.
|
||||
for k, v := range c.options.Parameters {
|
||||
switch strings.ToLower(k) {
|
||||
case "type":
|
||||
volumeOptions.VolumeType = v
|
||||
case "zone":
|
||||
volumeOptions.AvailabilityZone = v
|
||||
case "iopspergb":
|
||||
volumeOptions.IOPSPerGB, err = strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return "", 0, nil, fmt.Errorf("invalid iopsPerGB value %q, must be integer between 1 and 30: %v", v, err)
|
||||
}
|
||||
case "encrypted":
|
||||
volumeOptions.Encrypted, err = strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
return "", 0, nil, fmt.Errorf("invalid encrypted boolean value %q, must be true or false: %v", v, err)
|
||||
}
|
||||
case "kmskeyid":
|
||||
volumeOptions.KmsKeyId = v
|
||||
default:
|
||||
return "", 0, nil, fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName())
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: implement PVC.Selector parsing
|
||||
if c.options.PVC.Spec.Selector != nil {
|
||||
return "", 0, nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on AWS")
|
||||
}
|
||||
|
||||
name, err := cloud.CreateDisk(volumeOptions)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Error creating EBS Disk volume: %v", err)
|
||||
return "", 0, nil, err
|
||||
}
|
||||
glog.V(2).Infof("Successfully created EBS Disk volume %s", name)
|
||||
|
||||
labels, err := cloud.GetVolumeLabels(name)
|
||||
if err != nil {
|
||||
// We don't really want to leak the volume here...
|
||||
glog.Errorf("error building labels for new EBS volume %q: %v", name, err)
|
||||
}
|
||||
|
||||
return name, int(requestGB), labels, nil
|
||||
}
|
||||
|
||||
// Returns the first path that exists, or empty string if none exist.
|
||||
func verifyDevicePath(devicePaths []string) (string, error) {
|
||||
for _, path := range devicePaths {
|
||||
if pathExists, err := volumeutil.PathExists(path); err != nil {
|
||||
return "", fmt.Errorf("Error checking if path exists: %v", err)
|
||||
} else if pathExists {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Returns the first path that exists, or empty string if none exist.
|
||||
func verifyAllPathsRemoved(devicePaths []string) (bool, error) {
|
||||
allPathsRemoved := true
|
||||
for _, path := range devicePaths {
|
||||
if exists, err := volumeutil.PathExists(path); err != nil {
|
||||
return false, fmt.Errorf("Error checking if path exists: %v", err)
|
||||
} else {
|
||||
allPathsRemoved = allPathsRemoved && !exists
|
||||
}
|
||||
}
|
||||
|
||||
return allPathsRemoved, nil
|
||||
}
|
||||
|
||||
// Returns list of all paths for given EBS mount
|
||||
// This is more interesting on GCE (where we are able to identify volumes under /dev/disk-by-id)
|
||||
// Here it is mostly about applying the partition path
|
||||
func getDiskByIdPaths(partition string, devicePath string) []string {
|
||||
devicePaths := []string{}
|
||||
if devicePath != "" {
|
||||
devicePaths = append(devicePaths, devicePath)
|
||||
}
|
||||
|
||||
if partition != "" {
|
||||
for i, path := range devicePaths {
|
||||
devicePaths[i] = path + diskPartitionSuffix + partition
|
||||
}
|
||||
}
|
||||
|
||||
return devicePaths
|
||||
}
|
||||
|
||||
// Return cloud provider
|
||||
func getCloudProvider(cloudProvider cloudprovider.Interface) (*aws.Cloud, error) {
|
||||
awsCloudProvider, ok := cloudProvider.(*aws.Cloud)
|
||||
if !ok || awsCloudProvider == nil {
|
||||
return nil, fmt.Errorf("Failed to get AWS Cloud Provider. GetCloudProvider returned %v instead", cloudProvider)
|
||||
}
|
||||
|
||||
return awsCloudProvider, nil
|
||||
}
|
||||
19
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package aws_ebs contains the internal representation of AWS Elastic
|
||||
// Block Store volumes.
|
||||
package aws_ebs // import "k8s.io/kubernetes/pkg/volume/aws_ebs"
|
||||
58
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD
generated
vendored
Normal file
58
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"attacher.go",
|
||||
"azure_dd.go",
|
||||
"azure_provision.go",
|
||||
"vhd_util.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/keymutex:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/util/wait:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor:github.com/Azure/azure-sdk-for-go/arm/compute",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"azure_dd_test.go",
|
||||
"vhd_util_test.go",
|
||||
],
|
||||
library = "go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/testing:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor:github.com/Azure/azure-sdk-for-go/arm/compute",
|
||||
],
|
||||
)
|
||||
282
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go
generated
vendored
Normal file
282
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go
generated
vendored
Normal file
|
|
@ -0,0 +1,282 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/keymutex"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
type azureDiskAttacher struct {
|
||||
host volume.VolumeHost
|
||||
azureProvider azureCloudProvider
|
||||
}
|
||||
|
||||
var _ volume.Attacher = &azureDiskAttacher{}
|
||||
|
||||
var _ volume.AttachableVolumePlugin = &azureDataDiskPlugin{}
|
||||
|
||||
const (
|
||||
checkSleepDuration = time.Second
|
||||
)
|
||||
|
||||
// acquire lock to get an lun number
|
||||
var getLunMutex = keymutex.NewKeyMutex()
|
||||
|
||||
// NewAttacher initializes an Attacher
|
||||
func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) {
|
||||
azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
glog.V(4).Infof("failed to get azure provider")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &azureDiskAttacher{
|
||||
host: plugin.host,
|
||||
azureProvider: azure,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Attach attaches a volume.Spec to an Azure VM referenced by NodeName, returning the disk's LUN
|
||||
func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||
volumeSource, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
glog.Warningf("failed to get azure disk spec")
|
||||
return "", err
|
||||
}
|
||||
instanceid, err := attacher.azureProvider.InstanceID(nodeName)
|
||||
if err != nil {
|
||||
glog.Warningf("failed to get azure instance id")
|
||||
return "", fmt.Errorf("failed to get azure instance id for node %q", nodeName)
|
||||
}
|
||||
if ind := strings.LastIndex(instanceid, "/"); ind >= 0 {
|
||||
instanceid = instanceid[(ind + 1):]
|
||||
}
|
||||
|
||||
lun, err := attacher.azureProvider.GetDiskLun(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName)
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// Log error and continue with attach
|
||||
glog.Warningf(
|
||||
"Error checking if volume is already attached to current node (%q). Will continue and try attach anyway. err=%v",
|
||||
instanceid, err)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
// Volume is already attached to node.
|
||||
glog.V(4).Infof("Attach operation is successful. volume %q is already attached to node %q at lun %d.", volumeSource.DiskName, instanceid, lun)
|
||||
} else {
|
||||
getLunMutex.LockKey(instanceid)
|
||||
defer getLunMutex.UnlockKey(instanceid)
|
||||
|
||||
lun, err = attacher.azureProvider.GetNextDiskLun(nodeName)
|
||||
if err != nil {
|
||||
glog.Warningf("no LUN available for instance %q", nodeName)
|
||||
return "", fmt.Errorf("all LUNs are used, cannot attach volume %q to instance %q", volumeSource.DiskName, instanceid)
|
||||
}
|
||||
|
||||
err = attacher.azureProvider.AttachDisk(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, lun, compute.CachingTypes(*volumeSource.CachingMode))
|
||||
if err == nil {
|
||||
glog.V(4).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, nodeName)
|
||||
} else {
|
||||
glog.V(2).Infof("Attach volume %q to instance %q failed with %v", volumeSource.DataDiskURI, instanceid, err)
|
||||
return "", fmt.Errorf("Attach volume %q to instance %q failed with %v", volumeSource.DiskName, instanceid, err)
|
||||
}
|
||||
}
|
||||
|
||||
return strconv.Itoa(int(lun)), err
|
||||
}
|
||||
|
||||
func (attacher *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
|
||||
volumesAttachedCheck := make(map[*volume.Spec]bool)
|
||||
volumeSpecMap := make(map[string]*volume.Spec)
|
||||
volumeIDList := []string{}
|
||||
for _, spec := range specs {
|
||||
volumeSource, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
volumeIDList = append(volumeIDList, volumeSource.DiskName)
|
||||
volumesAttachedCheck[spec] = true
|
||||
volumeSpecMap[volumeSource.DiskName] = spec
|
||||
}
|
||||
attachedResult, err := attacher.azureProvider.DisksAreAttached(volumeIDList, nodeName)
|
||||
if err != nil {
|
||||
// Log error and continue with attach
|
||||
glog.Errorf(
|
||||
"Error checking if volumes (%v) are attached to current node (%q). err=%v",
|
||||
volumeIDList, nodeName, err)
|
||||
return volumesAttachedCheck, err
|
||||
}
|
||||
|
||||
for volumeID, attached := range attachedResult {
|
||||
if !attached {
|
||||
spec := volumeSpecMap[volumeID]
|
||||
volumesAttachedCheck[spec] = false
|
||||
glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name())
|
||||
}
|
||||
}
|
||||
return volumesAttachedCheck, nil
|
||||
}
|
||||
|
||||
// WaitForAttach runs on the node to detect if the volume (referenced by LUN) is attached. If attached, the device path is returned
|
||||
func (attacher *azureDiskAttacher) WaitForAttach(spec *volume.Spec, lunStr string, timeout time.Duration) (string, error) {
|
||||
volumeSource, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(lunStr) == 0 {
|
||||
return "", fmt.Errorf("WaitForAttach failed for Azure disk %q: lun is empty.", volumeSource.DiskName)
|
||||
}
|
||||
|
||||
lun, err := strconv.Atoi(lunStr)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("WaitForAttach: wrong lun %q, err: %v", lunStr, err)
|
||||
}
|
||||
scsiHostRescan(&osIOHandler{})
|
||||
exe := exec.New()
|
||||
devicePath := ""
|
||||
|
||||
err = wait.Poll(checkSleepDuration, timeout, func() (bool, error) {
|
||||
glog.V(4).Infof("Checking Azure disk %q(lun %s) is attached.", volumeSource.DiskName, lunStr)
|
||||
if devicePath, err = findDiskByLun(lun, &osIOHandler{}, exe); err == nil {
|
||||
glog.V(4).Infof("Successfully found attached Azure disk %q(lun %s, device path %s).", volumeSource.DiskName, lunStr, devicePath)
|
||||
return true, nil
|
||||
} else {
|
||||
//Log error, if any, and continue checking periodically
|
||||
glog.V(4).Infof("Error Stat Azure disk (%q) is attached: %v", volumeSource.DiskName, err)
|
||||
return false, nil
|
||||
}
|
||||
})
|
||||
return devicePath, err
|
||||
}
|
||||
|
||||
// GetDeviceMountPath finds the volume's mount path on the node
|
||||
func (attacher *azureDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
|
||||
volumeSource, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return makeGlobalPDPath(attacher.host, volumeSource.DiskName), nil
|
||||
}
|
||||
|
||||
// MountDevice runs mount command on the node to mount the volume
|
||||
func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
||||
mounter := attacher.host.GetMounter()
|
||||
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
|
||||
return err
|
||||
}
|
||||
notMnt = true
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
volumeSource, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := []string{}
|
||||
if spec.ReadOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
if notMnt {
|
||||
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}
|
||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, *volumeSource.FSType, options)
|
||||
if err != nil {
|
||||
os.Remove(deviceMountPath)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type azureDiskDetacher struct {
|
||||
mounter mount.Interface
|
||||
azureProvider azureCloudProvider
|
||||
}
|
||||
|
||||
var _ volume.Detacher = &azureDiskDetacher{}
|
||||
|
||||
// NewDetacher initializes a volume Detacher
|
||||
func (plugin *azureDataDiskPlugin) NewDetacher() (volume.Detacher, error) {
|
||||
azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &azureDiskDetacher{
|
||||
mounter: plugin.host.GetMounter(),
|
||||
azureProvider: azure,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Detach detaches disk from Azure VM.
|
||||
func (detacher *azureDiskDetacher) Detach(diskName string, nodeName types.NodeName) error {
|
||||
if diskName == "" {
|
||||
return fmt.Errorf("invalid disk to detach: %q", diskName)
|
||||
}
|
||||
instanceid, err := detacher.azureProvider.InstanceID(nodeName)
|
||||
if err != nil {
|
||||
glog.Warningf("no instance id for node %q, skip detaching", nodeName)
|
||||
return nil
|
||||
}
|
||||
if ind := strings.LastIndex(instanceid, "/"); ind >= 0 {
|
||||
instanceid = instanceid[(ind + 1):]
|
||||
}
|
||||
|
||||
glog.V(4).Infof("detach %v from node %q", diskName, nodeName)
|
||||
err = detacher.azureProvider.DetachDiskByName(diskName, "" /* diskURI */, nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to detach azure disk %q, err %v", diskName, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// UnmountDevice unmounts the volume on the node
|
||||
func (detacher *azureDiskDetacher) UnmountDevice(deviceMountPath string) error {
|
||||
volume := path.Base(deviceMountPath)
|
||||
if err := util.UnmountPath(deviceMountPath, detacher.mounter); err != nil {
|
||||
glog.Errorf("Error unmounting %q: %v", volume, err)
|
||||
return err
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
381
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go
generated
vendored
Normal file
381
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go
generated
vendored
Normal file
|
|
@ -0,0 +1,381 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/keymutex"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&azureDataDiskPlugin{}}
|
||||
}
|
||||
|
||||
type azureDataDiskPlugin struct {
|
||||
host volume.VolumeHost
|
||||
volumeLocks keymutex.KeyMutex
|
||||
}
|
||||
|
||||
// Abstract interface to disk operations.
|
||||
// azure cloud provider should implement it
|
||||
type azureCloudProvider interface {
|
||||
// Attaches the disk to the host machine.
|
||||
AttachDisk(diskName, diskUri string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error
|
||||
// Detaches the disk, identified by disk name or uri, from the host machine.
|
||||
DetachDiskByName(diskName, diskUri string, nodeName types.NodeName) error
|
||||
// Check if a list of volumes are attached to the node with the specified NodeName
|
||||
DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error)
|
||||
// Get the LUN number of the disk that is attached to the host
|
||||
GetDiskLun(diskName, diskUri string, nodeName types.NodeName) (int32, error)
|
||||
// Get the next available LUN number to attach a new VHD
|
||||
GetNextDiskLun(nodeName types.NodeName) (int32, error)
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
InstanceID(nodeName types.NodeName) (string, error)
|
||||
// Create a VHD blob
|
||||
CreateVolume(name, storageAccount, storageType, location string, requestGB int) (string, string, int, error)
|
||||
// Delete a VHD blob
|
||||
DeleteVolume(name, uri string) error
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &azureDataDiskPlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &azureDataDiskPlugin{}
|
||||
|
||||
const (
|
||||
azureDataDiskPluginName = "kubernetes.io/azure-disk"
|
||||
)
|
||||
|
||||
func (plugin *azureDataDiskPlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
plugin.volumeLocks = keymutex.NewKeyMutex()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) GetPluginName() string {
|
||||
return azureDataDiskPluginName
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeSource, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return volumeSource.DiskName, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil) ||
|
||||
(spec.Volume != nil && spec.Volume.AzureDisk != nil)
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) RequiresRemount() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
return []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
return plugin.newMounterInternal(spec, pod.UID, plugin.host.GetMounter())
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface) (volume.Mounter, error) {
|
||||
// azures used directly in a pod have a ReadOnly flag set by the pod author.
|
||||
// azures used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
|
||||
azure, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fsType := "ext4"
|
||||
if azure.FSType != nil {
|
||||
fsType = *azure.FSType
|
||||
}
|
||||
cachingMode := v1.AzureDataDiskCachingNone
|
||||
if azure.CachingMode != nil {
|
||||
cachingMode = *azure.CachingMode
|
||||
}
|
||||
readOnly := false
|
||||
if azure.ReadOnly != nil {
|
||||
readOnly = *azure.ReadOnly
|
||||
}
|
||||
diskName := azure.DiskName
|
||||
diskUri := azure.DataDiskURI
|
||||
return &azureDiskMounter{
|
||||
azureDisk: &azureDisk{
|
||||
podUID: podUID,
|
||||
volName: spec.Name(),
|
||||
diskName: diskName,
|
||||
diskUri: diskUri,
|
||||
cachingMode: cachingMode,
|
||||
mounter: mounter,
|
||||
plugin: plugin,
|
||||
},
|
||||
fsType: fsType,
|
||||
readOnly: readOnly,
|
||||
diskMounter: &mount.SafeFormatAndMount{Interface: plugin.host.GetMounter(), Runner: exec.New()}}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter())
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) {
|
||||
return &azureDiskUnmounter{
|
||||
&azureDisk{
|
||||
podUID: podUID,
|
||||
volName: volName,
|
||||
mounter: mounter,
|
||||
plugin: plugin,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volName, mountPath string) (*volume.Spec, error) {
|
||||
mounter := plugin.host.GetMounter()
|
||||
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
|
||||
sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
azVolume := &v1.Volume{
|
||||
Name: volName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AzureDisk: &v1.AzureDiskVolumeSource{
|
||||
DiskName: sourceName,
|
||||
},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(azVolume), nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
|
||||
mounter := plugin.host.GetMounter()
|
||||
return mount.GetMountRefs(mounter, deviceMountPath)
|
||||
}
|
||||
|
||||
type azureDisk struct {
|
||||
volName string
|
||||
podUID types.UID
|
||||
diskName string
|
||||
diskUri string
|
||||
cachingMode v1.AzureDataDiskCachingMode
|
||||
mounter mount.Interface
|
||||
plugin *azureDataDiskPlugin
|
||||
volume.MetricsNil
|
||||
}
|
||||
|
||||
type azureDiskMounter struct {
|
||||
*azureDisk
|
||||
// Filesystem type, optional.
|
||||
fsType string
|
||||
// Specifies whether the disk will be attached as read-only.
|
||||
readOnly bool
|
||||
// diskMounter provides the interface that is used to mount the actual block device.
|
||||
diskMounter *mount.SafeFormatAndMount
|
||||
}
|
||||
|
||||
var _ volume.Mounter = &azureDiskMounter{}
|
||||
|
||||
func (b *azureDiskMounter) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: b.readOnly,
|
||||
Managed: !b.readOnly,
|
||||
SupportsSELinux: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (b *azureDiskMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUp attaches the disk and bind mounts to the volume path.
|
||||
func (b *azureDiskMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
// SetUpAt attaches the disk and bind mounts to the volume path.
|
||||
func (b *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
b.plugin.volumeLocks.LockKey(b.diskName)
|
||||
defer b.plugin.volumeLocks.UnlockKey(b.diskName)
|
||||
|
||||
// TODO: handle failed mounts here.
|
||||
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
glog.V(4).Infof("DataDisk set up: %s %v %v", dir, !notMnt, err)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
glog.Errorf("IsLikelyNotMountPoint failed: %v", err)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
glog.V(4).Infof("%s is a mount point", dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
globalPDPath := makeGlobalPDPath(b.plugin.host, b.diskName)
|
||||
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
glog.V(4).Infof("Could not create directory %s: %v", dir, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
|
||||
options := []string{"bind"}
|
||||
if b.readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
err = b.mounter.Mount(globalPDPath, dir, "", options)
|
||||
if err != nil {
|
||||
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
|
||||
glog.Errorf("Failed to unmount: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
// This is very odd, we don't expect it. We'll try again next sync loop.
|
||||
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir)
|
||||
return err
|
||||
}
|
||||
}
|
||||
os.Remove(dir)
|
||||
return err
|
||||
}
|
||||
|
||||
if !b.readOnly {
|
||||
volume.SetVolumeOwnership(b, fsGroup)
|
||||
}
|
||||
glog.V(3).Infof("Azure disk volume %s mounted to %s", b.diskName, dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeGlobalPDPath(host volume.VolumeHost, volume string) string {
|
||||
return path.Join(host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, volume)
|
||||
}
|
||||
|
||||
func (azure *azureDisk) GetPath() string {
|
||||
name := azureDataDiskPluginName
|
||||
return azure.plugin.host.GetPodVolumeDir(azure.podUID, utilstrings.EscapeQualifiedNameForDisk(name), azure.volName)
|
||||
}
|
||||
|
||||
type azureDiskUnmounter struct {
|
||||
*azureDisk
|
||||
}
|
||||
|
||||
var _ volume.Unmounter = &azureDiskUnmounter{}
|
||||
|
||||
// Unmounts the bind mount, and detaches the disk only if the PD
|
||||
// resource was the last reference to that disk on the kubelet.
|
||||
func (c *azureDiskUnmounter) TearDown() error {
|
||||
return c.TearDownAt(c.GetPath())
|
||||
}
|
||||
|
||||
// Unmounts the bind mount, and detaches the disk only if the PD
|
||||
// resource was the last reference to that disk on the kubelet.
|
||||
func (c *azureDiskUnmounter) TearDownAt(dir string) error {
|
||||
notMnt, err := c.mounter.IsLikelyNotMountPoint(dir)
|
||||
if err != nil {
|
||||
glog.Errorf("Error checking if mountpoint %s: %v", dir, err)
|
||||
return err
|
||||
}
|
||||
if notMnt {
|
||||
glog.V(2).Info("Not mountpoint, deleting")
|
||||
return os.Remove(dir)
|
||||
}
|
||||
// lock the volume (and thus wait for any concurrrent SetUpAt to finish)
|
||||
c.plugin.volumeLocks.LockKey(c.diskName)
|
||||
defer c.plugin.volumeLocks.UnlockKey(c.diskName)
|
||||
refs, err := mount.GetMountRefs(c.mounter, dir)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting mountrefs for %s: %v", dir, err)
|
||||
return err
|
||||
}
|
||||
if len(refs) == 0 {
|
||||
glog.Errorf("Did not find pod-mount for %s during tear down", dir)
|
||||
return fmt.Errorf("%s is not mounted", dir)
|
||||
}
|
||||
c.diskName = path.Base(refs[0])
|
||||
glog.V(4).Infof("Found volume %s mounted to %s", c.diskName, dir)
|
||||
|
||||
// Unmount the bind-mount inside this pod
|
||||
if err := c.mounter.Unmount(dir); err != nil {
|
||||
glog.Errorf("Error unmounting dir %s %v", dir, err)
|
||||
return err
|
||||
}
|
||||
notMnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if notMnt {
|
||||
if err := os.Remove(dir); err != nil {
|
||||
glog.Errorf("Error removing mountpoint %s %v", dir, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.AzureDiskVolumeSource, error) {
|
||||
if spec.Volume != nil && spec.Volume.AzureDisk != nil {
|
||||
return spec.Volume.AzureDisk, nil
|
||||
}
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil {
|
||||
return spec.PersistentVolume.Spec.AzureDisk, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Spec does not reference an Azure disk volume type")
|
||||
}
|
||||
|
||||
// Return cloud provider
|
||||
func getAzureCloudProvider(cloudProvider cloudprovider.Interface) (azureCloudProvider, error) {
|
||||
azureCloudProvider, ok := cloudProvider.(*azure.Cloud)
|
||||
if !ok || azureCloudProvider == nil {
|
||||
return nil, fmt.Errorf("Failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider)
|
||||
}
|
||||
|
||||
return azureCloudProvider, nil
|
||||
}
|
||||
177
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_test.go
generated
vendored
Normal file
177
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,177 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("azure_dd")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName(azureDataDiskPluginName)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if plug.GetPluginName() != azureDataDiskPluginName {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{AzureDisk: &v1.AzureDiskVolumeSource{}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
|
||||
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{AzureDisk: &v1.AzureDiskVolumeSource{}}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
fakeDiskName = "foo"
|
||||
fakeDiskUri = "https://azure/vhds/bar.vhd"
|
||||
fakeLun = 2
|
||||
)
|
||||
|
||||
type fakeAzureProvider struct {
|
||||
}
|
||||
|
||||
func (fake *fakeAzureProvider) AttachDisk(diskName, diskUri, vmName string, lun int32, cachingMode compute.CachingTypes) error {
|
||||
if diskName != fakeDiskName || diskUri != fakeDiskUri || lun != fakeLun {
|
||||
return fmt.Errorf("wrong disk")
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (fake *fakeAzureProvider) DetachDiskByName(diskName, diskUri, vmName string) error {
|
||||
if diskName != fakeDiskName || diskUri != fakeDiskUri {
|
||||
return fmt.Errorf("wrong disk")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (fake *fakeAzureProvider) GetDiskLun(diskName, diskUri, vmName string) (int32, error) {
|
||||
return int32(fakeLun), nil
|
||||
}
|
||||
|
||||
func (fake *fakeAzureProvider) GetNextDiskLun(vmName string) (int32, error) {
|
||||
return fakeLun, nil
|
||||
}
|
||||
func (fake *fakeAzureProvider) InstanceID(name string) (string, error) {
|
||||
return "localhost", nil
|
||||
}
|
||||
|
||||
func (fake *fakeAzureProvider) CreateVolume(name, storageAccount, storageType, location string, requestGB int) (string, string, int, error) {
|
||||
return "", "", 0, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (fake *fakeAzureProvider) DeleteVolume(name, uri string) error {
|
||||
return fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func TestPlugin(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("azure_ddTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName(azureDataDiskPluginName)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
fs := "ext4"
|
||||
ro := false
|
||||
caching := v1.AzureDataDiskCachingNone
|
||||
spec := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AzureDisk: &v1.AzureDiskVolumeSource{
|
||||
DiskName: fakeDiskName,
|
||||
DataDiskURI: fakeDiskUri,
|
||||
FSType: &fs,
|
||||
CachingMode: &caching,
|
||||
ReadOnly: &ro,
|
||||
},
|
||||
},
|
||||
}
|
||||
mounter, err := plug.(*azureDataDiskPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &mount.FakeMounter{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~azure-disk/vol1")
|
||||
path := mounter.GetPath()
|
||||
if path != volPath {
|
||||
t.Errorf("Got unexpected path: %s, should be %s", path, volPath)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
unmounter, err := plug.(*azureDataDiskPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
t.Errorf("Got a nil Unmounter")
|
||||
}
|
||||
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
t.Errorf("TearDown() failed, volume path still exists: %s", path)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
162
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go
generated
vendored
Normal file
162
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go
generated
vendored
Normal file
|
|
@ -0,0 +1,162 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
var _ volume.DeletableVolumePlugin = &azureDataDiskPlugin{}
|
||||
var _ volume.ProvisionableVolumePlugin = &azureDataDiskPlugin{}
|
||||
|
||||
type azureDiskDeleter struct {
|
||||
*azureDisk
|
||||
azureProvider azureCloudProvider
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
||||
azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
glog.V(4).Infof("failed to get azure provider")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return plugin.newDeleterInternal(spec, azure)
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) newDeleterInternal(spec *volume.Spec, azure azureCloudProvider) (volume.Deleter, error) {
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk == nil {
|
||||
return nil, fmt.Errorf("invalid PV spec")
|
||||
}
|
||||
diskName := spec.PersistentVolume.Spec.AzureDisk.DiskName
|
||||
diskUri := spec.PersistentVolume.Spec.AzureDisk.DataDiskURI
|
||||
return &azureDiskDeleter{
|
||||
azureDisk: &azureDisk{
|
||||
volName: spec.Name(),
|
||||
diskName: diskName,
|
||||
diskUri: diskUri,
|
||||
plugin: plugin,
|
||||
},
|
||||
azureProvider: azure,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
glog.V(4).Infof("failed to get azure provider")
|
||||
return nil, err
|
||||
}
|
||||
if len(options.PVC.Spec.AccessModes) == 0 {
|
||||
options.PVC.Spec.AccessModes = plugin.GetAccessModes()
|
||||
}
|
||||
return plugin.newProvisionerInternal(options, azure)
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) newProvisionerInternal(options volume.VolumeOptions, azure azureCloudProvider) (volume.Provisioner, error) {
|
||||
return &azureDiskProvisioner{
|
||||
azureDisk: &azureDisk{
|
||||
plugin: plugin,
|
||||
},
|
||||
azureProvider: azure,
|
||||
options: options,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ volume.Deleter = &azureDiskDeleter{}
|
||||
|
||||
func (d *azureDiskDeleter) GetPath() string {
|
||||
name := azureDataDiskPluginName
|
||||
return d.plugin.host.GetPodVolumeDir(d.podUID, utilstrings.EscapeQualifiedNameForDisk(name), d.volName)
|
||||
}
|
||||
|
||||
func (d *azureDiskDeleter) Delete() error {
|
||||
glog.V(4).Infof("deleting volume %s", d.diskUri)
|
||||
return d.azureProvider.DeleteVolume(d.diskName, d.diskUri)
|
||||
}
|
||||
|
||||
type azureDiskProvisioner struct {
|
||||
*azureDisk
|
||||
azureProvider azureCloudProvider
|
||||
options volume.VolumeOptions
|
||||
}
|
||||
|
||||
var _ volume.Provisioner = &azureDiskProvisioner{}
|
||||
|
||||
func (a *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
var sku, location, account string
|
||||
|
||||
name := volume.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 255)
|
||||
capacity := a.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
requestBytes := capacity.Value()
|
||||
requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||
|
||||
// Apply ProvisionerParameters (case-insensitive). We leave validation of
|
||||
// the values to the cloud provider.
|
||||
for k, v := range a.options.Parameters {
|
||||
switch strings.ToLower(k) {
|
||||
case "skuname":
|
||||
sku = v
|
||||
case "location":
|
||||
location = v
|
||||
case "storageaccount":
|
||||
account = v
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, a.plugin.GetPluginName())
|
||||
}
|
||||
}
|
||||
// TODO: implement c.options.ProvisionerSelector parsing
|
||||
if a.options.PVC.Spec.Selector != nil {
|
||||
return nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Azure disk")
|
||||
}
|
||||
|
||||
diskName, diskUri, sizeGB, err := a.azureProvider.CreateVolume(name, account, sku, location, requestGB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: a.options.PVName,
|
||||
Labels: map[string]string{},
|
||||
Annotations: map[string]string{
|
||||
"kubernetes.io/createdby": "azure-disk-dynamic-provisioner",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: a.options.PersistentVolumeReclaimPolicy,
|
||||
AccessModes: a.options.PVC.Spec.AccessModes,
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AzureDisk: &v1.AzureDiskVolumeSource{
|
||||
DiskName: diskName,
|
||||
DataDiskURI: diskUri,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return pv, nil
|
||||
}
|
||||
117
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/vhd_util.go
generated
vendored
Normal file
117
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/vhd_util.go
generated
vendored
Normal file
|
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
)
|
||||
|
||||
type ioHandler interface {
|
||||
ReadDir(dirname string) ([]os.FileInfo, error)
|
||||
WriteFile(filename string, data []byte, perm os.FileMode) error
|
||||
}
|
||||
|
||||
type osIOHandler struct{}
|
||||
|
||||
func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
|
||||
return ioutil.ReadDir(dirname)
|
||||
}
|
||||
func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||
return ioutil.WriteFile(filename, data, perm)
|
||||
}
|
||||
|
||||
// given a LUN find the VHD device path like /dev/sdb
|
||||
// VHD disks under sysfs are like /sys/bus/scsi/devices/3:0:1:0
|
||||
// return empty string if no disk is found
|
||||
func findDiskByLun(lun int, io ioHandler, exe exec.Interface) (string, error) {
|
||||
var err error
|
||||
sys_path := "/sys/bus/scsi/devices"
|
||||
if dirs, err := io.ReadDir(sys_path); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := f.Name()
|
||||
// look for path like /sys/bus/scsi/devices/3:0:1:0
|
||||
arr := strings.Split(name, ":")
|
||||
if len(arr) < 4 {
|
||||
continue
|
||||
}
|
||||
target, err := strconv.Atoi(arr[0])
|
||||
if err != nil {
|
||||
glog.Errorf("failed to parse target from %v (%v), err %v", arr[0], name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// as observed, targets 0-3 are used by OS disks. Skip them
|
||||
if target > 3 {
|
||||
l, err := strconv.Atoi(arr[3])
|
||||
if err != nil {
|
||||
// unknown path format, continue to read the next one
|
||||
glog.Errorf("failed to parse lun from %v (%v), err %v", arr[3], name, err)
|
||||
continue
|
||||
}
|
||||
if lun == l {
|
||||
// find the matching LUN
|
||||
// read vendor and model to ensure it is a VHD disk
|
||||
vendor := path.Join(sys_path, name, "vendor")
|
||||
model := path.Join(sys_path, name, "model")
|
||||
out, err := exe.Command("cat", vendor, model).CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Errorf("failed to cat device vendor and model, err: %v", err)
|
||||
continue
|
||||
}
|
||||
matched, err := regexp.MatchString("^MSFT[ ]{0,}\nVIRTUAL DISK[ ]{0,}\n$", strings.ToUpper(string(out)))
|
||||
if err != nil || !matched {
|
||||
glog.V(4).Infof("doesn't match VHD, output %v, error %v", string(out), err)
|
||||
continue
|
||||
}
|
||||
// find it!
|
||||
dir := path.Join(sys_path, name, "block")
|
||||
dev, err := io.ReadDir(dir)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to read %s", dir)
|
||||
} else {
|
||||
return "/dev/" + dev[0].Name(), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
// rescan scsi bus
|
||||
func scsiHostRescan(io ioHandler) {
|
||||
scsi_path := "/sys/class/scsi_host/"
|
||||
if dirs, err := io.ReadDir(scsi_path); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := scsi_path + f.Name() + "/scan"
|
||||
data := []byte("- - -")
|
||||
if err = io.WriteFile(name, data, 0666); err != nil {
|
||||
glog.Errorf("failed to rescan scsi host %s", name)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
glog.Errorf("failed to read %s, err %v", scsi_path, err)
|
||||
}
|
||||
}
|
||||
116
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/vhd_util_test.go
generated
vendored
Normal file
116
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/vhd_util_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,116 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
)
|
||||
|
||||
type fakeFileInfo struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (fi *fakeFileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
|
||||
func (fi *fakeFileInfo) Size() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (fi *fakeFileInfo) Mode() os.FileMode {
|
||||
return 777
|
||||
}
|
||||
|
||||
func (fi *fakeFileInfo) ModTime() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
func (fi *fakeFileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (fi *fakeFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
lun = 1
|
||||
lunStr = "1"
|
||||
diskPath = "4:0:0:" + lunStr
|
||||
devName = "sda"
|
||||
)
|
||||
|
||||
type fakeIOHandler struct{}
|
||||
|
||||
func (handler *fakeIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
|
||||
switch dirname {
|
||||
case "/sys/bus/scsi/devices":
|
||||
f1 := &fakeFileInfo{
|
||||
name: "3:0:0:1",
|
||||
}
|
||||
f2 := &fakeFileInfo{
|
||||
name: "4:0:0:0",
|
||||
}
|
||||
f3 := &fakeFileInfo{
|
||||
name: diskPath,
|
||||
}
|
||||
f4 := &fakeFileInfo{
|
||||
name: "host1",
|
||||
}
|
||||
f5 := &fakeFileInfo{
|
||||
name: "target2:0:0",
|
||||
}
|
||||
return []os.FileInfo{f1, f2, f3, f4, f5}, nil
|
||||
case "/sys/bus/scsi/devices/" + diskPath + "/block":
|
||||
n := &fakeFileInfo{
|
||||
name: devName,
|
||||
}
|
||||
return []os.FileInfo{n}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("bad dir")
|
||||
}
|
||||
|
||||
func (handler *fakeIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestIoHandler(t *testing.T) {
|
||||
var fcmd exec.FakeCmd
|
||||
fcmd = exec.FakeCmd{
|
||||
CombinedOutputScript: []exec.FakeCombinedOutputAction{
|
||||
// cat
|
||||
func() ([]byte, error) {
|
||||
return []byte("Msft \nVirtual Disk \n"), nil
|
||||
},
|
||||
},
|
||||
}
|
||||
fake := exec.FakeExec{
|
||||
CommandScript: []exec.FakeCommandAction{
|
||||
func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
|
||||
},
|
||||
}
|
||||
disk, err := findDiskByLun(lun, &fakeIOHandler{}, &fake)
|
||||
// if no disk matches lun, exit
|
||||
if disk != "/dev/"+devName || err != nil {
|
||||
t.Errorf("no data disk disk found: disk %v err %v", disk, err)
|
||||
}
|
||||
}
|
||||
44
vendor/k8s.io/kubernetes/pkg/volume/azure_file/BUILD
generated
vendored
Normal file
44
vendor/k8s.io/kubernetes/pkg/volume/azure_file/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"azure_file.go",
|
||||
"azure_util.go",
|
||||
"doc.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["azure_file_test.go"],
|
||||
library = "go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
2
vendor/k8s.io/kubernetes/pkg/volume/azure_file/OWNERS
generated
vendored
Normal file
2
vendor/k8s.io/kubernetes/pkg/volume/azure_file/OWNERS
generated
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
assignees:
|
||||
- rootfs
|
||||
280
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go
generated
vendored
Normal file
280
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go
generated
vendored
Normal file
|
|
@ -0,0 +1,280 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_file
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&azureFilePlugin{nil}}
|
||||
}
|
||||
|
||||
type azureFilePlugin struct {
|
||||
host volume.VolumeHost
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &azureFilePlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &azureFilePlugin{}
|
||||
|
||||
const (
|
||||
azureFilePluginName = "kubernetes.io/azure-file"
|
||||
)
|
||||
|
||||
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
|
||||
return host.GetPodVolumeDir(uid, kstrings.EscapeQualifiedNameForDisk(azureFilePluginName), volName)
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) GetPluginName() string {
|
||||
return azureFilePluginName
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return volumeSource.ShareName, nil
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) CanSupport(spec *volume.Spec) bool {
|
||||
//TODO: check if mount.cifs is there
|
||||
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureFile != nil) ||
|
||||
(spec.Volume != nil && spec.Volume.AzureFile != nil)
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) RequiresRemount() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
return []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
v1.ReadWriteMany,
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
return plugin.newMounterInternal(spec, pod, &azureSvc{}, plugin.host.GetMounter())
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, util azureUtil, mounter mount.Interface) (volume.Mounter, error) {
|
||||
source, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &azureFileMounter{
|
||||
azureFile: &azureFile{
|
||||
volName: spec.Name(),
|
||||
mounter: mounter,
|
||||
pod: pod,
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsStatFS(getPath(pod.UID, spec.Name(), plugin.host)),
|
||||
},
|
||||
util: util,
|
||||
secretName: source.SecretName,
|
||||
shareName: source.ShareName,
|
||||
readOnly: readOnly,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter())
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) {
|
||||
return &azureFileUnmounter{&azureFile{
|
||||
volName: volName,
|
||||
mounter: mounter,
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: podUID}},
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)),
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) ConstructVolumeSpec(volName, mountPath string) (*volume.Spec, error) {
|
||||
azureVolume := &v1.Volume{
|
||||
Name: volName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AzureFile: &v1.AzureFileVolumeSource{
|
||||
SecretName: volName,
|
||||
ShareName: volName,
|
||||
},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(azureVolume), nil
|
||||
}
|
||||
|
||||
// azureFile volumes represent mount of an AzureFile share.
|
||||
type azureFile struct {
|
||||
volName string
|
||||
pod *v1.Pod
|
||||
mounter mount.Interface
|
||||
plugin *azureFilePlugin
|
||||
volume.MetricsProvider
|
||||
}
|
||||
|
||||
func (azureFileVolume *azureFile) GetPath() string {
|
||||
return getPath(azureFileVolume.pod.UID, azureFileVolume.volName, azureFileVolume.plugin.host)
|
||||
}
|
||||
|
||||
type azureFileMounter struct {
|
||||
*azureFile
|
||||
util azureUtil
|
||||
secretName string
|
||||
shareName string
|
||||
readOnly bool
|
||||
}
|
||||
|
||||
var _ volume.Mounter = &azureFileMounter{}
|
||||
|
||||
func (b *azureFileMounter) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: b.readOnly,
|
||||
Managed: !b.readOnly,
|
||||
SupportsSELinux: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (b *azureFileMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUp attaches the disk and bind mounts to the volume path.
|
||||
func (b *azureFileMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
func (b *azureFileMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
glog.V(4).Infof("AzureFile mount set up: %s %v %v", dir, !notMnt, err)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
return nil
|
||||
}
|
||||
var accountKey, accountName string
|
||||
if accountName, accountKey, err = b.util.GetAzureCredentials(b.plugin.host, b.pod.Namespace, b.secretName, b.shareName); err != nil {
|
||||
return err
|
||||
}
|
||||
os.MkdirAll(dir, 0750)
|
||||
source := fmt.Sprintf("//%s.file.core.windows.net/%s", accountName, b.shareName)
|
||||
// parameters suggested by https://azure.microsoft.com/en-us/documentation/articles/storage-how-to-use-files-linux/
|
||||
options := []string{fmt.Sprintf("vers=3.0,username=%s,password=%s,dir_mode=0777,file_mode=0777", accountName, accountKey)}
|
||||
if b.readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
err = b.mounter.Mount(source, dir, "cifs", options)
|
||||
if err != nil {
|
||||
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
|
||||
glog.Errorf("Failed to unmount: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
// This is very odd, we don't expect it. We'll try again next sync loop.
|
||||
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir)
|
||||
return err
|
||||
}
|
||||
}
|
||||
os.Remove(dir)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ volume.Unmounter = &azureFileUnmounter{}
|
||||
|
||||
type azureFileUnmounter struct {
|
||||
*azureFile
|
||||
}
|
||||
|
||||
func (c *azureFileUnmounter) TearDown() error {
|
||||
return c.TearDownAt(c.GetPath())
|
||||
}
|
||||
|
||||
func (c *azureFileUnmounter) TearDownAt(dir string) error {
|
||||
notMnt, err := c.mounter.IsLikelyNotMountPoint(dir)
|
||||
if err != nil {
|
||||
glog.Errorf("Error checking IsLikelyNotMountPoint: %v", err)
|
||||
return err
|
||||
}
|
||||
if notMnt {
|
||||
return os.Remove(dir)
|
||||
}
|
||||
|
||||
if err := c.mounter.Unmount(dir); err != nil {
|
||||
glog.Errorf("Unmounting failed: %v", err)
|
||||
return err
|
||||
}
|
||||
notMnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return mntErr
|
||||
}
|
||||
if notMnt {
|
||||
if err := os.Remove(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getVolumeSource(
|
||||
spec *volume.Spec) (*v1.AzureFileVolumeSource, bool, error) {
|
||||
if spec.Volume != nil && spec.Volume.AzureFile != nil {
|
||||
return spec.Volume.AzureFile, spec.Volume.AzureFile.ReadOnly, nil
|
||||
} else if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.AzureFile != nil {
|
||||
return spec.PersistentVolume.Spec.AzureFile, spec.ReadOnly, nil
|
||||
}
|
||||
|
||||
return nil, false, fmt.Errorf("Spec does not reference an AzureFile volume type")
|
||||
}
|
||||
240
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file_test.go
generated
vendored
Normal file
240
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,240 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_file
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir(os.TempDir(), "azureFileTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/azure-file")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if plug.GetPluginName() != "kubernetes.io/azure-file" {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{AzureFile: &v1.AzureFileVolumeSource{}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{AzureFile: &v1.AzureFileVolumeSource{}}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAccessModes(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir(os.TempDir(), "azureFileTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/azure-file")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if !contains(plug.GetAccessModes(), v1.ReadWriteOnce) || !contains(plug.GetAccessModes(), v1.ReadOnlyMany) || !contains(plug.GetAccessModes(), v1.ReadWriteMany) {
|
||||
t.Errorf("Expected three AccessModeTypes: %s, %s, and %s", v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany)
|
||||
}
|
||||
}
|
||||
|
||||
func contains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
|
||||
for _, m := range modes {
|
||||
if m == mode {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func TestPlugin(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir(os.TempDir(), "azurefileTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/azure-file")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
spec := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AzureFile: &v1.AzureFileVolumeSource{
|
||||
SecretName: "secret",
|
||||
ShareName: "share",
|
||||
},
|
||||
},
|
||||
}
|
||||
fake := &mount.FakeMounter{}
|
||||
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, err := plug.(*azureFilePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, &fakeAzureSvc{}, fake)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~azure-file/vol1")
|
||||
path := mounter.GetPath()
|
||||
if path != volPath {
|
||||
t.Errorf("Got unexpected path: %s", path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
unmounter, err := plug.(*azureFilePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
t.Errorf("Got a nil Unmounter")
|
||||
}
|
||||
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
t.Errorf("TearDown() failed, volume path still exists: %s", path)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "pvA",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AzureFile: &v1.AzureFileVolumeSource{},
|
||||
},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
Name: "claimA",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
claim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "claimA",
|
||||
Namespace: "nsA",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pvA",
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimBound,
|
||||
},
|
||||
}
|
||||
|
||||
client := fake.NewSimpleClientset(pv, claim)
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost("/tmp/fake", client, nil))
|
||||
plug, _ := plugMgr.FindPluginByName(azureFilePluginName)
|
||||
|
||||
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, true)
|
||||
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
|
||||
|
||||
if !mounter.GetAttributes().ReadOnly {
|
||||
t.Errorf("Expected true for mounter.IsReadOnly")
|
||||
}
|
||||
}
|
||||
|
||||
type fakeAzureSvc struct{}
|
||||
|
||||
func (s *fakeAzureSvc) GetAzureCredentials(host volume.VolumeHost, nameSpace, secretName, shareName string) (string, string, error) {
|
||||
return "name", "key", nil
|
||||
}
|
||||
|
||||
func TestMounterAndUnmounterTypeAssert(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir(os.TempDir(), "azurefileTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/azure-file")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
spec := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AzureFile: &v1.AzureFileVolumeSource{
|
||||
SecretName: "secret",
|
||||
ShareName: "share",
|
||||
},
|
||||
},
|
||||
}
|
||||
fake := &mount.FakeMounter{}
|
||||
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, err := plug.(*azureFilePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, &fakeAzureSvc{}, fake)
|
||||
if _, ok := mounter.(volume.Unmounter); ok {
|
||||
t.Errorf("Volume Mounter can be type-assert to Unmounter")
|
||||
}
|
||||
|
||||
unmounter, err := plug.(*azureFilePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
|
||||
if _, ok := unmounter.(volume.Mounter); ok {
|
||||
t.Errorf("Volume Unmounter can be type-assert to Mounter")
|
||||
}
|
||||
}
|
||||
55
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_util.go
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_util.go
generated
vendored
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_file
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
// Abstract interface to azure file operations.
|
||||
type azureUtil interface {
|
||||
GetAzureCredentials(host volume.VolumeHost, nameSpace, secretName, shareName string) (string, string, error)
|
||||
}
|
||||
|
||||
type azureSvc struct{}
|
||||
|
||||
func (s *azureSvc) GetAzureCredentials(host volume.VolumeHost, nameSpace, secretName, shareName string) (string, string, error) {
|
||||
var accountKey, accountName string
|
||||
kubeClient := host.GetKubeClient()
|
||||
if kubeClient == nil {
|
||||
return "", "", fmt.Errorf("Cannot get kube client")
|
||||
}
|
||||
|
||||
keys, err := kubeClient.Core().Secrets(nameSpace).Get(secretName)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("Couldn't get secret %v/%v", nameSpace, secretName)
|
||||
}
|
||||
for name, data := range keys.Data {
|
||||
if name == "azurestorageaccountname" {
|
||||
accountName = string(data)
|
||||
}
|
||||
if name == "azurestorageaccountkey" {
|
||||
accountKey = string(data)
|
||||
}
|
||||
}
|
||||
if accountName == "" || accountKey == "" {
|
||||
return "", "", fmt.Errorf("Invalid %v/%v, couldn't extract azurestorageaccountname or azurestorageaccountkey", nameSpace, secretName)
|
||||
}
|
||||
return accountName, accountKey, nil
|
||||
}
|
||||
19
vendor/k8s.io/kubernetes/pkg/volume/azure_file/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/volume/azure_file/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package azure_file contains the internal representation of
|
||||
// Azure File Service Volume
|
||||
package azure_file // import "k8s.io/kubernetes/pkg/volume/azure_file"
|
||||
43
vendor/k8s.io/kubernetes/pkg/volume/cephfs/BUILD
generated
vendored
Normal file
43
vendor/k8s.io/kubernetes/pkg/volume/cephfs/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cephfs.go",
|
||||
"doc.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["cephfs_test.go"],
|
||||
library = "go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/testing:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
2
vendor/k8s.io/kubernetes/pkg/volume/cephfs/OWNERS
generated
vendored
Normal file
2
vendor/k8s.io/kubernetes/pkg/volume/cephfs/OWNERS
generated
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
assignees:
|
||||
- rootfs
|
||||
324
vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go
generated
vendored
Normal file
324
vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go
generated
vendored
Normal file
|
|
@ -0,0 +1,324 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cephfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&cephfsPlugin{nil}}
|
||||
}
|
||||
|
||||
type cephfsPlugin struct {
|
||||
host volume.VolumeHost
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &cephfsPlugin{}
|
||||
|
||||
const (
|
||||
cephfsPluginName = "kubernetes.io/cephfs"
|
||||
)
|
||||
|
||||
func (plugin *cephfsPlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *cephfsPlugin) GetPluginName() string {
|
||||
return cephfsPluginName
|
||||
}
|
||||
|
||||
func (plugin *cephfsPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v", volumeSource.Monitors), nil
|
||||
}
|
||||
|
||||
func (plugin *cephfsPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
return (spec.Volume != nil && spec.Volume.CephFS != nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CephFS != nil)
|
||||
}
|
||||
|
||||
func (plugin *cephfsPlugin) RequiresRemount() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *cephfsPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
return []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
v1.ReadWriteMany,
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *cephfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
cephvs, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
secret := ""
|
||||
if cephvs.SecretRef != nil {
|
||||
kubeClient := plugin.host.GetKubeClient()
|
||||
if kubeClient == nil {
|
||||
return nil, fmt.Errorf("Cannot get kube client")
|
||||
}
|
||||
|
||||
secretName, err := kubeClient.Core().Secrets(pod.Namespace).Get(cephvs.SecretRef.Name)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Couldn't get secret %v/%v err: %v", pod.Namespace, cephvs.SecretRef, err)
|
||||
return nil, err
|
||||
}
|
||||
for name, data := range secretName.Data {
|
||||
secret = string(data)
|
||||
glog.V(1).Infof("found ceph secret info: %s", name)
|
||||
}
|
||||
}
|
||||
return plugin.newMounterInternal(spec, pod.UID, plugin.host.GetMounter(), secret)
|
||||
}
|
||||
|
||||
func (plugin *cephfsPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface, secret string) (volume.Mounter, error) {
|
||||
cephvs, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
id := cephvs.User
|
||||
if id == "" {
|
||||
id = "admin"
|
||||
}
|
||||
path := cephvs.Path
|
||||
if path == "" {
|
||||
path = "/"
|
||||
}
|
||||
if !strings.HasPrefix(path, "/") {
|
||||
path = "/" + path
|
||||
}
|
||||
secret_file := cephvs.SecretFile
|
||||
if secret_file == "" {
|
||||
secret_file = "/etc/ceph/" + id + ".secret"
|
||||
}
|
||||
|
||||
return &cephfsMounter{
|
||||
cephfs: &cephfs{
|
||||
podUID: podUID,
|
||||
volName: spec.Name(),
|
||||
mon: cephvs.Monitors,
|
||||
path: path,
|
||||
secret: secret,
|
||||
id: id,
|
||||
secret_file: secret_file,
|
||||
readonly: cephvs.ReadOnly,
|
||||
mounter: mounter,
|
||||
plugin: plugin},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *cephfsPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter())
|
||||
}
|
||||
|
||||
func (plugin *cephfsPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) {
|
||||
return &cephfsUnmounter{
|
||||
cephfs: &cephfs{
|
||||
podUID: podUID,
|
||||
volName: volName,
|
||||
mounter: mounter,
|
||||
plugin: plugin},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *cephfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
cephfsVolume := &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
CephFS: &v1.CephFSVolumeSource{
|
||||
Monitors: []string{},
|
||||
Path: volumeName,
|
||||
},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(cephfsVolume), nil
|
||||
}
|
||||
|
||||
// CephFS volumes represent a bare host file or directory mount of an CephFS export.
|
||||
type cephfs struct {
|
||||
volName string
|
||||
podUID types.UID
|
||||
mon []string
|
||||
path string
|
||||
id string
|
||||
secret string
|
||||
secret_file string
|
||||
readonly bool
|
||||
mounter mount.Interface
|
||||
plugin *cephfsPlugin
|
||||
volume.MetricsNil
|
||||
}
|
||||
|
||||
type cephfsMounter struct {
|
||||
*cephfs
|
||||
}
|
||||
|
||||
var _ volume.Mounter = &cephfsMounter{}
|
||||
|
||||
func (cephfsVolume *cephfsMounter) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: cephfsVolume.readonly,
|
||||
Managed: false,
|
||||
SupportsSELinux: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (caphfsMounter *cephfsMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUp attaches the disk and bind mounts to the volume path.
|
||||
func (cephfsVolume *cephfsMounter) SetUp(fsGroup *int64) error {
|
||||
return cephfsVolume.SetUpAt(cephfsVolume.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
// SetUpAt attaches the disk and bind mounts to the volume path.
|
||||
func (cephfsVolume *cephfsMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
notMnt, err := cephfsVolume.mounter.IsLikelyNotMountPoint(dir)
|
||||
glog.V(4).Infof("CephFS mount set up: %s %v %v", dir, !notMnt, err)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
return nil
|
||||
}
|
||||
os.MkdirAll(dir, 0750)
|
||||
|
||||
err = cephfsVolume.execMount(dir)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanup upon failure
|
||||
cephfsVolume.cleanup(dir)
|
||||
// return error
|
||||
return err
|
||||
}
|
||||
|
||||
type cephfsUnmounter struct {
|
||||
*cephfs
|
||||
}
|
||||
|
||||
var _ volume.Unmounter = &cephfsUnmounter{}
|
||||
|
||||
// TearDown unmounts the bind mount
|
||||
func (cephfsVolume *cephfsUnmounter) TearDown() error {
|
||||
return cephfsVolume.TearDownAt(cephfsVolume.GetPath())
|
||||
}
|
||||
|
||||
// TearDownAt unmounts the bind mount
|
||||
func (cephfsVolume *cephfsUnmounter) TearDownAt(dir string) error {
|
||||
return cephfsVolume.cleanup(dir)
|
||||
}
|
||||
|
||||
// GatePath creates global mount path
|
||||
func (cephfsVolume *cephfs) GetPath() string {
|
||||
name := cephfsPluginName
|
||||
return cephfsVolume.plugin.host.GetPodVolumeDir(cephfsVolume.podUID, utilstrings.EscapeQualifiedNameForDisk(name), cephfsVolume.volName)
|
||||
}
|
||||
|
||||
func (cephfsVolume *cephfs) cleanup(dir string) error {
|
||||
noMnt, err := cephfsVolume.mounter.IsLikelyNotMountPoint(dir)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("CephFS: Error checking IsLikelyNotMountPoint: %v", err)
|
||||
}
|
||||
if noMnt {
|
||||
return os.RemoveAll(dir)
|
||||
}
|
||||
|
||||
if err := cephfsVolume.mounter.Unmount(dir); err != nil {
|
||||
return fmt.Errorf("CephFS: Unmounting failed: %v", err)
|
||||
}
|
||||
noMnt, mntErr := cephfsVolume.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
return fmt.Errorf("CephFS: IsMountpoint check failed: %v", mntErr)
|
||||
}
|
||||
if noMnt {
|
||||
if err := os.RemoveAll(dir); err != nil {
|
||||
return fmt.Errorf("CephFS: removeAll %s/%v", dir, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cephfsVolume *cephfs) execMount(mountpoint string) error {
|
||||
// cephfs mount option
|
||||
ceph_opt := ""
|
||||
// override secretfile if secret is provided
|
||||
if cephfsVolume.secret != "" {
|
||||
ceph_opt = "name=" + cephfsVolume.id + ",secret=" + cephfsVolume.secret
|
||||
} else {
|
||||
ceph_opt = "name=" + cephfsVolume.id + ",secretfile=" + cephfsVolume.secret_file
|
||||
}
|
||||
// build option array
|
||||
opt := []string{}
|
||||
if cephfsVolume.readonly {
|
||||
opt = append(opt, "ro")
|
||||
}
|
||||
opt = append(opt, ceph_opt)
|
||||
|
||||
// build src like mon1:6789,mon2:6789,mon3:6789:/
|
||||
hosts := cephfsVolume.mon
|
||||
l := len(hosts)
|
||||
// pass all monitors and let ceph randomize and fail over
|
||||
i := 0
|
||||
src := ""
|
||||
for i = 0; i < l-1; i++ {
|
||||
src += hosts[i] + ","
|
||||
}
|
||||
src += hosts[i] + ":" + cephfsVolume.path
|
||||
|
||||
if err := cephfsVolume.mounter.Mount(src, mountpoint, "ceph", opt); err != nil {
|
||||
return fmt.Errorf("CephFS: mount failed: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.CephFSVolumeSource, bool, error) {
|
||||
if spec.Volume != nil && spec.Volume.CephFS != nil {
|
||||
return spec.Volume.CephFS, spec.Volume.CephFS.ReadOnly, nil
|
||||
} else if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.CephFS != nil {
|
||||
return spec.PersistentVolume.Spec.CephFS, spec.ReadOnly, nil
|
||||
}
|
||||
|
||||
return nil, false, fmt.Errorf("Spec does not reference a CephFS volume type")
|
||||
}
|
||||
137
vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs_test.go
generated
vendored
Normal file
137
vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,137 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cephfs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("cephTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/cephfs")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if plug.GetPluginName() != "kubernetes.io/cephfs" {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{CephFS: &v1.CephFSVolumeSource{}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPlugin(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("cephTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/cephfs")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
spec := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
CephFS: &v1.CephFSVolumeSource{
|
||||
Monitors: []string{"a", "b"},
|
||||
User: "user",
|
||||
SecretRef: nil,
|
||||
SecretFile: "/etc/ceph/user.secret",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
mounter, err := plug.(*cephfsPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &mount.FakeMounter{}, "secrets")
|
||||
volumePath := mounter.GetPath()
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
volpath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~cephfs/vol1")
|
||||
path := mounter.GetPath()
|
||||
if path != volpath {
|
||||
t.Errorf("Got unexpected path: %s", path)
|
||||
}
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(volumePath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
unmounter, err := plug.(*cephfsPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
t.Errorf("Got a nil Unmounter")
|
||||
}
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(volumePath); err == nil {
|
||||
t.Errorf("TearDown() failed, volume path still exists: %s", volumePath)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConstructVolumeSpec(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("cephTest")
|
||||
if err != nil {
|
||||
t.Fatalf("Can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/cephfs")
|
||||
if err != nil {
|
||||
t.Errorf("can't find cephfs plugin by name")
|
||||
}
|
||||
|
||||
cephfsSpec, err := plug.(*cephfsPlugin).ConstructVolumeSpec("cephfsVolume", "/cephfsVolume/")
|
||||
|
||||
if cephfsSpec.Name() != "cephfsVolume" {
|
||||
t.Errorf("Get wrong cephfs spec name, got: %s", cephfsSpec.Name())
|
||||
}
|
||||
}
|
||||
19
vendor/k8s.io/kubernetes/pkg/volume/cephfs/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/volume/cephfs/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package nfs contains the internal representation of Ceph file system
|
||||
// (CephFS) volumes.
|
||||
package cephfs // import "k8s.io/kubernetes/pkg/volume/cephfs"
|
||||
57
vendor/k8s.io/kubernetes/pkg/volume/cinder/BUILD
generated
vendored
Normal file
57
vendor/k8s.io/kubernetes/pkg/volume/cinder/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"attacher.go",
|
||||
"cinder.go",
|
||||
"cinder_util.go",
|
||||
"doc.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/openstack:go_default_library",
|
||||
"//pkg/cloudprovider/providers/rackspace:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/keymutex:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"attacher_test.go",
|
||||
"cinder_test.go",
|
||||
],
|
||||
library = "go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/testing:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
3
vendor/k8s.io/kubernetes/pkg/volume/cinder/OWNERS
generated
vendored
Normal file
3
vendor/k8s.io/kubernetes/pkg/volume/cinder/OWNERS
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
maintainers:
|
||||
- jsafrane
|
||||
- spothanis
|
||||
286
vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go
generated
vendored
Normal file
286
vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go
generated
vendored
Normal file
|
|
@ -0,0 +1,286 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cinder
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
type cinderDiskAttacher struct {
|
||||
host volume.VolumeHost
|
||||
cinderProvider CinderProvider
|
||||
}
|
||||
|
||||
var _ volume.Attacher = &cinderDiskAttacher{}
|
||||
|
||||
var _ volume.AttachableVolumePlugin = &cinderPlugin{}
|
||||
|
||||
const (
|
||||
checkSleepDuration = time.Second
|
||||
)
|
||||
|
||||
func (plugin *cinderPlugin) NewAttacher() (volume.Attacher, error) {
|
||||
cinder, err := getCloudProvider(plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cinderDiskAttacher{
|
||||
host: plugin.host,
|
||||
cinderProvider: cinder,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
|
||||
mounter := plugin.host.GetMounter()
|
||||
return mount.GetMountRefs(mounter, deviceMountPath)
|
||||
}
|
||||
|
||||
func (attacher *cinderDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
volumeID := volumeSource.VolumeID
|
||||
|
||||
instances, res := attacher.cinderProvider.Instances()
|
||||
if !res {
|
||||
return "", fmt.Errorf("failed to list openstack instances")
|
||||
}
|
||||
instanceid, err := instances.InstanceID(nodeName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if ind := strings.LastIndex(instanceid, "/"); ind >= 0 {
|
||||
instanceid = instanceid[(ind + 1):]
|
||||
}
|
||||
attached, err := attacher.cinderProvider.DiskIsAttached(volumeID, instanceid)
|
||||
if err != nil {
|
||||
// Log error and continue with attach
|
||||
glog.Warningf(
|
||||
"Error checking if volume (%q) is already attached to current node (%q). Will continue and try attach anyway. err=%v",
|
||||
volumeID, instanceid, err)
|
||||
}
|
||||
|
||||
if err == nil && attached {
|
||||
// Volume is already attached to node.
|
||||
glog.Infof("Attach operation is successful. volume %q is already attached to node %q.", volumeID, instanceid)
|
||||
} else {
|
||||
_, err = attacher.cinderProvider.AttachDisk(instanceid, volumeID)
|
||||
if err == nil {
|
||||
glog.Infof("Attach operation successful: volume %q attached to node %q.", volumeID, instanceid)
|
||||
} else {
|
||||
glog.Infof("Attach volume %q to instance %q failed with %v", volumeID, instanceid, err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
devicePath, err := attacher.cinderProvider.GetAttachmentDiskPath(instanceid, volumeID)
|
||||
if err != nil {
|
||||
glog.Infof("Attach volume %q to instance %q failed with %v", volumeID, instanceid, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return devicePath, err
|
||||
}
|
||||
|
||||
func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
|
||||
volumesAttachedCheck := make(map[*volume.Spec]bool)
|
||||
volumeSpecMap := make(map[string]*volume.Spec)
|
||||
volumeIDList := []string{}
|
||||
for _, spec := range specs {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
volumeIDList = append(volumeIDList, volumeSource.VolumeID)
|
||||
volumesAttachedCheck[spec] = true
|
||||
volumeSpecMap[volumeSource.VolumeID] = spec
|
||||
}
|
||||
attachedResult, err := attacher.cinderProvider.DisksAreAttached(volumeIDList, string(nodeName))
|
||||
if err != nil {
|
||||
// Log error and continue with attach
|
||||
glog.Errorf(
|
||||
"Error checking if Volumes (%v) are already attached to current node (%q). Will continue and try attach anyway. err=%v",
|
||||
volumeIDList, nodeName, err)
|
||||
return volumesAttachedCheck, err
|
||||
}
|
||||
|
||||
for volumeID, attached := range attachedResult {
|
||||
if !attached {
|
||||
spec := volumeSpecMap[volumeID]
|
||||
volumesAttachedCheck[spec] = false
|
||||
glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name())
|
||||
}
|
||||
}
|
||||
return volumesAttachedCheck, nil
|
||||
}
|
||||
|
||||
func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, timeout time.Duration) (string, error) {
|
||||
// NOTE: devicePath is is path as reported by Cinder, which may be incorrect and should not be used. See Issue #33128
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
volumeID := volumeSource.VolumeID
|
||||
|
||||
if devicePath == "" {
|
||||
return "", fmt.Errorf("WaitForAttach failed for Cinder disk %q: devicePath is empty.", volumeID)
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(checkSleepDuration)
|
||||
defer ticker.Stop()
|
||||
timer := time.NewTimer(timeout)
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
probeAttachedVolume()
|
||||
select {
|
||||
case <-ticker.C:
|
||||
glog.V(5).Infof("Checking Cinder disk %q is attached.", volumeID)
|
||||
probeAttachedVolume()
|
||||
if !attacher.cinderProvider.ShouldTrustDevicePath() {
|
||||
// Using the Cinder volume ID, find the real device path (See Issue #33128)
|
||||
devicePath = attacher.cinderProvider.GetDevicePath(volumeID)
|
||||
}
|
||||
exists, err := volumeutil.PathExists(devicePath)
|
||||
if exists && err == nil {
|
||||
glog.Infof("Successfully found attached Cinder disk %q at %v.", volumeID, devicePath)
|
||||
return devicePath, nil
|
||||
} else {
|
||||
// Log an error, and continue checking periodically
|
||||
glog.Errorf("Error: could not find attached Cinder disk %q: %v", volumeID, err)
|
||||
}
|
||||
case <-timer.C:
|
||||
return "", fmt.Errorf("Could not find attached Cinder disk %q. Timeout waiting for mount paths to be created.", volumeID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (attacher *cinderDiskAttacher) GetDeviceMountPath(
|
||||
spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return makeGlobalPDName(attacher.host, volumeSource.VolumeID), nil
|
||||
}
|
||||
|
||||
// FIXME: this method can be further pruned.
|
||||
func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
||||
mounter := attacher.host.GetMounter()
|
||||
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
|
||||
return err
|
||||
}
|
||||
notMnt = true
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := []string{}
|
||||
if readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
if notMnt {
|
||||
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}
|
||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, options)
|
||||
if err != nil {
|
||||
os.Remove(deviceMountPath)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type cinderDiskDetacher struct {
|
||||
mounter mount.Interface
|
||||
cinderProvider CinderProvider
|
||||
}
|
||||
|
||||
var _ volume.Detacher = &cinderDiskDetacher{}
|
||||
|
||||
func (plugin *cinderPlugin) NewDetacher() (volume.Detacher, error) {
|
||||
cinder, err := getCloudProvider(plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cinderDiskDetacher{
|
||||
mounter: plugin.host.GetMounter(),
|
||||
cinderProvider: cinder,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (detacher *cinderDiskDetacher) Detach(deviceMountPath string, nodeName types.NodeName) error {
|
||||
volumeID := path.Base(deviceMountPath)
|
||||
instances, res := detacher.cinderProvider.Instances()
|
||||
if !res {
|
||||
return fmt.Errorf("failed to list openstack instances")
|
||||
}
|
||||
instanceid, err := instances.InstanceID(nodeName)
|
||||
if ind := strings.LastIndex(instanceid, "/"); ind >= 0 {
|
||||
instanceid = instanceid[(ind + 1):]
|
||||
}
|
||||
|
||||
attached, err := detacher.cinderProvider.DiskIsAttached(volumeID, instanceid)
|
||||
if err != nil {
|
||||
// Log error and continue with detach
|
||||
glog.Errorf(
|
||||
"Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v",
|
||||
volumeID, nodeName, err)
|
||||
}
|
||||
|
||||
if err == nil && !attached {
|
||||
// Volume is already detached from node.
|
||||
glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volumeID, nodeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = detacher.cinderProvider.DetachDisk(instanceid, volumeID); err != nil {
|
||||
glog.Errorf("Error detaching volume %q: %v", volumeID, err)
|
||||
return err
|
||||
}
|
||||
glog.Infof("detatached volume %q from instance %q", volumeID, instanceid)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (detacher *cinderDiskDetacher) UnmountDevice(deviceMountPath string) error {
|
||||
return volumeutil.UnmountPath(deviceMountPath, detacher.mounter)
|
||||
}
|
||||
459
vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher_test.go
generated
vendored
Normal file
459
vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,459 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cinder
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
func TestGetDeviceName_Volume(t *testing.T) {
|
||||
plugin := newPlugin()
|
||||
name := "my-cinder-volume"
|
||||
spec := createVolSpec(name, false)
|
||||
|
||||
deviceName, err := plugin.GetVolumeName(spec)
|
||||
if err != nil {
|
||||
t.Errorf("GetDeviceName error: %v", err)
|
||||
}
|
||||
if deviceName != name {
|
||||
t.Errorf("GetDeviceName error: expected %s, got %s", name, deviceName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDeviceName_PersistentVolume(t *testing.T) {
|
||||
plugin := newPlugin()
|
||||
name := "my-cinder-pv"
|
||||
spec := createPVSpec(name, true)
|
||||
|
||||
deviceName, err := plugin.GetVolumeName(spec)
|
||||
if err != nil {
|
||||
t.Errorf("GetDeviceName error: %v", err)
|
||||
}
|
||||
if deviceName != name {
|
||||
t.Errorf("GetDeviceName error: expected %s, got %s", name, deviceName)
|
||||
}
|
||||
}
|
||||
|
||||
// One testcase for TestAttachDetach table test below
|
||||
type testcase struct {
|
||||
name string
|
||||
// For fake GCE:
|
||||
attach attachCall
|
||||
detach detachCall
|
||||
diskIsAttached diskIsAttachedCall
|
||||
diskPath diskPathCall
|
||||
t *testing.T
|
||||
|
||||
instanceID string
|
||||
// Actual test to run
|
||||
test func(test *testcase) (string, error)
|
||||
// Expected return of the test
|
||||
expectedDevice string
|
||||
expectedError error
|
||||
}
|
||||
|
||||
func TestAttachDetach(t *testing.T) {
|
||||
diskName := "disk"
|
||||
instanceID := "instance"
|
||||
nodeName := types.NodeName(instanceID)
|
||||
readOnly := false
|
||||
spec := createVolSpec(diskName, readOnly)
|
||||
attachError := errors.New("Fake attach error")
|
||||
detachError := errors.New("Fake detach error")
|
||||
diskCheckError := errors.New("Fake DiskIsAttached error")
|
||||
diskPathError := errors.New("Fake GetAttachmentDiskPath error")
|
||||
tests := []testcase{
|
||||
// Successful Attach call
|
||||
{
|
||||
name: "Attach_Positive",
|
||||
instanceID: instanceID,
|
||||
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, nil},
|
||||
attach: attachCall{diskName, instanceID, "", nil},
|
||||
diskPath: diskPathCall{diskName, instanceID, "/dev/sda", nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedDevice: "/dev/sda",
|
||||
},
|
||||
|
||||
// Disk is already attached
|
||||
{
|
||||
name: "Attach_Positive_AlreadyAttached",
|
||||
instanceID: instanceID,
|
||||
diskIsAttached: diskIsAttachedCall{diskName, instanceID, true, nil},
|
||||
diskPath: diskPathCall{diskName, instanceID, "/dev/sda", nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedDevice: "/dev/sda",
|
||||
},
|
||||
|
||||
// DiskIsAttached fails and Attach succeeds
|
||||
{
|
||||
name: "Attach_Positive_CheckFails",
|
||||
instanceID: instanceID,
|
||||
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, diskCheckError},
|
||||
attach: attachCall{diskName, instanceID, "", nil},
|
||||
diskPath: diskPathCall{diskName, instanceID, "/dev/sda", nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedDevice: "/dev/sda",
|
||||
},
|
||||
|
||||
// Attach call fails
|
||||
{
|
||||
name: "Attach_Negative",
|
||||
instanceID: instanceID,
|
||||
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, diskCheckError},
|
||||
attach: attachCall{diskName, instanceID, "/dev/sda", attachError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedError: attachError,
|
||||
},
|
||||
|
||||
// GetAttachmentDiskPath call fails
|
||||
{
|
||||
name: "Attach_Negative_DiskPatchFails",
|
||||
instanceID: instanceID,
|
||||
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, diskCheckError},
|
||||
attach: attachCall{diskName, instanceID, "", nil},
|
||||
diskPath: diskPathCall{diskName, instanceID, "", diskPathError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedError: diskPathError,
|
||||
},
|
||||
|
||||
// Detach succeeds
|
||||
{
|
||||
name: "Detach_Positive",
|
||||
instanceID: instanceID,
|
||||
diskIsAttached: diskIsAttachedCall{diskName, instanceID, true, nil},
|
||||
detach: detachCall{diskName, instanceID, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(diskName, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
// Disk is already detached
|
||||
{
|
||||
name: "Detach_Positive_AlreadyDetached",
|
||||
instanceID: instanceID,
|
||||
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(diskName, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
// Detach succeeds when DiskIsAttached fails
|
||||
{
|
||||
name: "Detach_Positive_CheckFails",
|
||||
instanceID: instanceID,
|
||||
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, diskCheckError},
|
||||
detach: detachCall{diskName, instanceID, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(diskName, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
// Detach fails
|
||||
{
|
||||
name: "Detach_Negative",
|
||||
instanceID: instanceID,
|
||||
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, diskCheckError},
|
||||
detach: detachCall{diskName, instanceID, detachError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(diskName, nodeName)
|
||||
},
|
||||
expectedError: detachError,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range tests {
|
||||
testcase.t = t
|
||||
device, err := testcase.test(&testcase)
|
||||
if err != testcase.expectedError {
|
||||
t.Errorf("%s failed: expected err=%q, got %q", testcase.name, testcase.expectedError.Error(), err.Error())
|
||||
}
|
||||
if device != testcase.expectedDevice {
|
||||
t.Errorf("%s failed: expected device=%q, got %q", testcase.name, testcase.expectedDevice, device)
|
||||
}
|
||||
t.Logf("Test %q succeeded", testcase.name)
|
||||
}
|
||||
}
|
||||
|
||||
// newPlugin creates a new gcePersistentDiskPlugin with fake cloud, NewAttacher
|
||||
// and NewDetacher won't work.
|
||||
func newPlugin() *cinderPlugin {
|
||||
host := volumetest.NewFakeVolumeHost("/tmp", nil, nil)
|
||||
plugins := ProbeVolumePlugins()
|
||||
plugin := plugins[0]
|
||||
plugin.Init(host)
|
||||
return plugin.(*cinderPlugin)
|
||||
}
|
||||
|
||||
func newAttacher(testcase *testcase) *cinderDiskAttacher {
|
||||
return &cinderDiskAttacher{
|
||||
host: nil,
|
||||
cinderProvider: testcase,
|
||||
}
|
||||
}
|
||||
|
||||
func newDetacher(testcase *testcase) *cinderDiskDetacher {
|
||||
return &cinderDiskDetacher{
|
||||
cinderProvider: testcase,
|
||||
}
|
||||
}
|
||||
|
||||
func createVolSpec(name string, readOnly bool) *volume.Spec {
|
||||
return &volume.Spec{
|
||||
Volume: &v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{
|
||||
VolumeID: name,
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createPVSpec(name string, readOnly bool) *volume.Spec {
|
||||
return &volume.Spec{
|
||||
PersistentVolume: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{
|
||||
VolumeID: name,
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Fake GCE implementation
|
||||
|
||||
type attachCall struct {
|
||||
diskName string
|
||||
instanceID string
|
||||
retDeviceName string
|
||||
ret error
|
||||
}
|
||||
|
||||
type detachCall struct {
|
||||
devicePath string
|
||||
instanceID string
|
||||
ret error
|
||||
}
|
||||
|
||||
type diskIsAttachedCall struct {
|
||||
diskName, instanceID string
|
||||
isAttached bool
|
||||
ret error
|
||||
}
|
||||
|
||||
type diskPathCall struct {
|
||||
diskName, instanceID string
|
||||
retPath string
|
||||
ret error
|
||||
}
|
||||
|
||||
func (testcase *testcase) AttachDisk(instanceID string, diskName string) (string, error) {
|
||||
expected := &testcase.attach
|
||||
|
||||
if expected.diskName == "" && expected.instanceID == "" {
|
||||
// testcase.attach looks uninitialized, test did not expect to call
|
||||
// AttachDisk
|
||||
testcase.t.Errorf("Unexpected AttachDisk call!")
|
||||
return "", errors.New("Unexpected AttachDisk call!")
|
||||
}
|
||||
|
||||
if expected.diskName != diskName {
|
||||
testcase.t.Errorf("Unexpected AttachDisk call: expected diskName %s, got %s", expected.diskName, diskName)
|
||||
return "", errors.New("Unexpected AttachDisk call: wrong diskName")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("Unexpected AttachDisk call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return "", errors.New("Unexpected AttachDisk call: wrong instanceID")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", diskName, instanceID, expected.retDeviceName, expected.ret)
|
||||
|
||||
return expected.retDeviceName, expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) DetachDisk(instanceID string, partialDiskId string) error {
|
||||
expected := &testcase.detach
|
||||
|
||||
if expected.devicePath == "" && expected.instanceID == "" {
|
||||
// testcase.detach looks uninitialized, test did not expect to call
|
||||
// DetachDisk
|
||||
testcase.t.Errorf("Unexpected DetachDisk call!")
|
||||
return errors.New("Unexpected DetachDisk call!")
|
||||
}
|
||||
|
||||
if expected.devicePath != partialDiskId {
|
||||
testcase.t.Errorf("Unexpected DetachDisk call: expected partialDiskId %s, got %s", expected.devicePath, partialDiskId)
|
||||
return errors.New("Unexpected DetachDisk call: wrong diskName")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("Unexpected DetachDisk call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return errors.New("Unexpected DetachDisk call: wrong instanceID")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", partialDiskId, instanceID, expected.ret)
|
||||
|
||||
return expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) DiskIsAttached(diskName, instanceID string) (bool, error) {
|
||||
expected := &testcase.diskIsAttached
|
||||
|
||||
if expected.diskName == "" && expected.instanceID == "" {
|
||||
// testcase.diskIsAttached looks uninitialized, test did not expect to
|
||||
// call DiskIsAttached
|
||||
testcase.t.Errorf("Unexpected DiskIsAttached call!")
|
||||
return false, errors.New("Unexpected DiskIsAttached call!")
|
||||
}
|
||||
|
||||
if expected.diskName != diskName {
|
||||
testcase.t.Errorf("Unexpected DiskIsAttached call: expected diskName %s, got %s", expected.diskName, diskName)
|
||||
return false, errors.New("Unexpected DiskIsAttached call: wrong diskName")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("Unexpected DiskIsAttached call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return false, errors.New("Unexpected DiskIsAttached call: wrong instanceID")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, instanceID, expected.isAttached, expected.ret)
|
||||
|
||||
return expected.isAttached, expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) GetAttachmentDiskPath(instanceID string, diskName string) (string, error) {
|
||||
expected := &testcase.diskPath
|
||||
if expected.diskName == "" && expected.instanceID == "" {
|
||||
// testcase.diskPath looks uninitialized, test did not expect to
|
||||
// call GetAttachmentDiskPath
|
||||
testcase.t.Errorf("Unexpected GetAttachmentDiskPath call!")
|
||||
return "", errors.New("Unexpected GetAttachmentDiskPath call!")
|
||||
}
|
||||
|
||||
if expected.diskName != diskName {
|
||||
testcase.t.Errorf("Unexpected GetAttachmentDiskPath call: expected diskName %s, got %s", expected.diskName, diskName)
|
||||
return "", errors.New("Unexpected GetAttachmentDiskPath call: wrong diskName")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("Unexpected GetAttachmentDiskPath call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return "", errors.New("Unexpected GetAttachmentDiskPath call: wrong instanceID")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("GetAttachmentDiskPath call: %s, %s, returning %v, %v", diskName, instanceID, expected.retPath, expected.ret)
|
||||
|
||||
return expected.retPath, expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) ShouldTrustDevicePath() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (testcase *testcase) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (volumeName string, err error) {
|
||||
return "", errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (testcase *testcase) GetDevicePath(diskId string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (testcase *testcase) InstanceID() (string, error) {
|
||||
return testcase.instanceID, nil
|
||||
}
|
||||
|
||||
func (testcase *testcase) DeleteVolume(volumeName string) error {
|
||||
return errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (testcase *testcase) GetAutoLabelsForPD(name string) (map[string]string, error) {
|
||||
return map[string]string{}, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (testcase *testcase) Instances() (cloudprovider.Instances, bool) {
|
||||
return &instances{testcase.instanceID}, true
|
||||
}
|
||||
|
||||
func (testcase *testcase) DisksAreAttached(diskNames []string, nodeName string) (map[string]bool, error) {
|
||||
return nil, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
// Implementation of fake cloudprovider.Instances
|
||||
type instances struct {
|
||||
instanceID string
|
||||
}
|
||||
|
||||
func (instances *instances) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
return []v1.NodeAddress{}, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) ExternalID(name types.NodeName) (string, error) {
|
||||
return "", errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) InstanceID(name types.NodeName) (string, error) {
|
||||
return instances.instanceID, nil
|
||||
}
|
||||
|
||||
func (instances *instances) InstanceType(name types.NodeName) (string, error) {
|
||||
return "", errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) List(filter string) ([]types.NodeName, error) {
|
||||
return []types.NodeName{}, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
return errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
return "", errors.New("Not implemented")
|
||||
}
|
||||
512
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go
generated
vendored
Normal file
512
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
229
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_test.go
generated
vendored
Normal file
229
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,229 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cinder
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("cinderTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/cinder")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if plug.GetPluginName() != "kubernetes.io/cinder" {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{Cinder: &v1.CinderVolumeSource{}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
|
||||
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{Cinder: &v1.CinderVolumeSource{}}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
}
|
||||
|
||||
type fakePDManager struct {
|
||||
// How long should AttachDisk/DetachDisk take - we need slower AttachDisk in a test.
|
||||
attachDetachDuration time.Duration
|
||||
}
|
||||
|
||||
func getFakeDeviceName(host volume.VolumeHost, pdName string) string {
|
||||
return path.Join(host.GetPluginDir(cinderVolumePluginName), "device", pdName)
|
||||
}
|
||||
|
||||
// Real Cinder AttachDisk attaches a cinder volume. If it is not yet mounted,
|
||||
// it mounts it to globalPDPath.
|
||||
// We create a dummy directory (="device") and bind-mount it to globalPDPath
|
||||
func (fake *fakePDManager) AttachDisk(b *cinderVolumeMounter, globalPDPath string) error {
|
||||
globalPath := makeGlobalPDName(b.plugin.host, b.pdName)
|
||||
fakeDeviceName := getFakeDeviceName(b.plugin.host, b.pdName)
|
||||
err := os.MkdirAll(fakeDeviceName, 0750)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Attaching a Cinder volume can be slow...
|
||||
time.Sleep(fake.attachDetachDuration)
|
||||
|
||||
// The volume is "attached", bind-mount it if it's not mounted yet.
|
||||
notmnt, err := b.mounter.IsLikelyNotMountPoint(globalPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(globalPath, 0750); err != nil {
|
||||
return err
|
||||
}
|
||||
notmnt = true
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if notmnt {
|
||||
err = b.mounter.Mount(fakeDeviceName, globalPath, "", []string{"bind"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fake *fakePDManager) DetachDisk(c *cinderVolumeUnmounter) error {
|
||||
globalPath := makeGlobalPDName(c.plugin.host, c.pdName)
|
||||
fakeDeviceName := getFakeDeviceName(c.plugin.host, c.pdName)
|
||||
// unmount the bind-mount - should be fast
|
||||
err := c.mounter.Unmount(globalPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// "Detach" the fake "device"
|
||||
err = os.RemoveAll(fakeDeviceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fake *fakePDManager) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, err error) {
|
||||
return "test-volume-name", 1, nil
|
||||
}
|
||||
|
||||
func (fake *fakePDManager) DeleteVolume(cd *cinderVolumeDeleter) error {
|
||||
if cd.pdName != "test-volume-name" {
|
||||
return fmt.Errorf("Deleter got unexpected volume name: %s", cd.pdName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestPlugin(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("cinderTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/cinder")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
spec := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{
|
||||
VolumeID: "pd",
|
||||
FSType: "ext4",
|
||||
},
|
||||
},
|
||||
}
|
||||
mounter, err := plug.(*cinderPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{0}, &mount.FakeMounter{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~cinder/vol1")
|
||||
path := mounter.GetPath()
|
||||
if path != volPath {
|
||||
t.Errorf("Got unexpected path: %s", path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
unmounter, err := plug.(*cinderPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{0}, &mount.FakeMounter{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
t.Errorf("Got a nil Unmounter")
|
||||
}
|
||||
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
t.Errorf("TearDown() failed, volume path still exists: %s", path)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
|
||||
// Test Provisioner
|
||||
options := volume.VolumeOptions{
|
||||
PVC: volumetest.CreateTestPVC("100Mi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
|
||||
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
|
||||
}
|
||||
provisioner, err := plug.(*cinderPlugin).newProvisionerInternal(options, &fakePDManager{0})
|
||||
persistentSpec, err := provisioner.Provision()
|
||||
if err != nil {
|
||||
t.Errorf("Provision() failed: %v", err)
|
||||
}
|
||||
|
||||
if persistentSpec.Spec.PersistentVolumeSource.Cinder.VolumeID != "test-volume-name" {
|
||||
t.Errorf("Provision() returned unexpected volume ID: %s", persistentSpec.Spec.PersistentVolumeSource.Cinder.VolumeID)
|
||||
}
|
||||
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
|
||||
size := cap.Value()
|
||||
if size != 1024*1024*1024 {
|
||||
t.Errorf("Provision() returned unexpected volume size: %v", size)
|
||||
}
|
||||
|
||||
// Test Deleter
|
||||
volSpec := &volume.Spec{
|
||||
PersistentVolume: persistentSpec,
|
||||
}
|
||||
deleter, err := plug.(*cinderPlugin).newDeleterInternal(volSpec, &fakePDManager{0})
|
||||
err = deleter.Delete()
|
||||
if err != nil {
|
||||
t.Errorf("Deleter() failed: %v", err)
|
||||
}
|
||||
}
|
||||
187
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go
generated
vendored
Normal file
187
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go
generated
vendored
Normal file
|
|
@ -0,0 +1,187 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cinder
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
type CinderDiskUtil struct{}
|
||||
|
||||
// Attaches a disk specified by a volume.CinderPersistenDisk to the current kubelet.
|
||||
// Mounts the disk to its global path.
|
||||
func (util *CinderDiskUtil) AttachDisk(b *cinderVolumeMounter, globalPDPath string) error {
|
||||
options := []string{}
|
||||
if b.readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
cloud, err := b.plugin.getCloudProvider()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
instanceid, err := cloud.InstanceID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
diskid, err := cloud.AttachDisk(instanceid, b.pdName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var devicePath string
|
||||
numTries := 0
|
||||
for {
|
||||
devicePath = cloud.GetDevicePath(diskid)
|
||||
probeAttachedVolume()
|
||||
|
||||
_, err := os.Stat(devicePath)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
numTries++
|
||||
if numTries == 10 {
|
||||
return errors.New("Could not attach disk: Timeout after 60s")
|
||||
}
|
||||
time.Sleep(time.Second * 6)
|
||||
}
|
||||
notmnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
|
||||
return err
|
||||
}
|
||||
notmnt = true
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if notmnt {
|
||||
err = b.blockDeviceMounter.FormatAndMount(devicePath, globalPDPath, b.fsType, options)
|
||||
if err != nil {
|
||||
os.Remove(globalPDPath)
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("Safe mount successful: %q\n", devicePath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmounts the device and detaches the disk from the kubelet's host machine.
|
||||
func (util *CinderDiskUtil) DetachDisk(cd *cinderVolumeUnmounter) error {
|
||||
globalPDPath := makeGlobalPDName(cd.plugin.host, cd.pdName)
|
||||
if err := cd.mounter.Unmount(globalPDPath); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Remove(globalPDPath); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("Successfully unmounted main device: %s\n", globalPDPath)
|
||||
|
||||
cloud, err := cd.plugin.getCloudProvider()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
instanceid, err := cloud.InstanceID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = cloud.DetachDisk(instanceid, cd.pdName); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("Successfully detached cinder volume %s", cd.pdName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (util *CinderDiskUtil) DeleteVolume(cd *cinderVolumeDeleter) error {
|
||||
cloud, err := cd.plugin.getCloudProvider()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = cloud.DeleteVolume(cd.pdName); err != nil {
|
||||
// OpenStack cloud provider returns volume.tryAgainError when necessary,
|
||||
// no handling needed here.
|
||||
glog.V(2).Infof("Error deleting cinder volume %s: %v", cd.pdName, err)
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("Successfully deleted cinder volume %s", cd.pdName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, err error) {
|
||||
cloud, err := c.plugin.getCloudProvider()
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
|
||||
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
volSizeBytes := capacity.Value()
|
||||
// Cinder works with gigabytes, convert to GiB with rounding up
|
||||
volSizeGB := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024))
|
||||
name := volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // Cinder volume name can have up to 255 characters
|
||||
vtype := ""
|
||||
availability := ""
|
||||
// Apply ProvisionerParameters (case-insensitive). We leave validation of
|
||||
// the values to the cloud provider.
|
||||
for k, v := range c.options.Parameters {
|
||||
switch strings.ToLower(k) {
|
||||
case "type":
|
||||
vtype = v
|
||||
case "availability":
|
||||
availability = v
|
||||
default:
|
||||
return "", 0, fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName())
|
||||
}
|
||||
}
|
||||
// TODO: implement PVC.Selector parsing
|
||||
if c.options.PVC.Spec.Selector != nil {
|
||||
return "", 0, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Cinder")
|
||||
}
|
||||
|
||||
name, err = cloud.CreateVolume(name, volSizeGB, vtype, availability, c.options.CloudTags)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Error creating cinder volume: %v", err)
|
||||
return "", 0, err
|
||||
}
|
||||
glog.V(2).Infof("Successfully created cinder volume %s", name)
|
||||
return name, volSizeGB, nil
|
||||
}
|
||||
|
||||
func probeAttachedVolume() error {
|
||||
executor := exec.New()
|
||||
args := []string{"trigger"}
|
||||
cmd := executor.Command("/usr/bin/udevadm", args...)
|
||||
_, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Errorf("error running udevadm trigger %v\n", err)
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("Successfully probed all attachments")
|
||||
return nil
|
||||
}
|
||||
18
vendor/k8s.io/kubernetes/pkg/volume/cinder/doc.go
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/pkg/volume/cinder/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package cinder contains the internal representation of cinder volumes.
|
||||
package cinder // import "k8s.io/kubernetes/pkg/volume/cinder"
|
||||
47
vendor/k8s.io/kubernetes/pkg/volume/configmap/BUILD
generated
vendored
Normal file
47
vendor/k8s.io/kubernetes/pkg/volume/configmap/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"configmap.go",
|
||||
"doc.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/io:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["configmap_test.go"],
|
||||
library = "go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/release_1_5:go_default_library",
|
||||
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/empty_dir:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
],
|
||||
)
|
||||
2
vendor/k8s.io/kubernetes/pkg/volume/configmap/OWNERS
generated
vendored
Normal file
2
vendor/k8s.io/kubernetes/pkg/volume/configmap/OWNERS
generated
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
maintainers:
|
||||
- pmorie
|
||||
289
vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go
generated
vendored
Normal file
289
vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go
generated
vendored
Normal file
|
|
@ -0,0 +1,289 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package configmap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
ioutil "k8s.io/kubernetes/pkg/util/io"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// ProbeVolumePlugin is the entry point for plugin detection in a package.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&configMapPlugin{}}
|
||||
}
|
||||
|
||||
const (
|
||||
configMapPluginName = "kubernetes.io/configmap"
|
||||
)
|
||||
|
||||
// configMapPlugin implements the VolumePlugin interface.
|
||||
type configMapPlugin struct {
|
||||
host volume.VolumeHost
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &configMapPlugin{}
|
||||
|
||||
func (plugin *configMapPlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *configMapPlugin) GetPluginName() string {
|
||||
return configMapPluginName
|
||||
}
|
||||
|
||||
func (plugin *configMapPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _ := getVolumeSource(spec)
|
||||
if volumeSource == nil {
|
||||
return "", fmt.Errorf("Spec does not reference a ConfigMap volume type")
|
||||
}
|
||||
|
||||
return fmt.Sprintf(
|
||||
"%v/%v",
|
||||
spec.Name(),
|
||||
volumeSource.Name), nil
|
||||
}
|
||||
|
||||
func (plugin *configMapPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
return spec.Volume != nil && spec.Volume.ConfigMap != nil
|
||||
}
|
||||
|
||||
func (plugin *configMapPlugin) RequiresRemount() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (plugin *configMapPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||
return &configMapVolumeMounter{
|
||||
configMapVolume: &configMapVolume{spec.Name(), pod.UID, plugin, plugin.host.GetMounter(), plugin.host.GetWriter(), volume.MetricsNil{}},
|
||||
source: *spec.Volume.ConfigMap,
|
||||
pod: *pod,
|
||||
opts: &opts}, nil
|
||||
}
|
||||
|
||||
func (plugin *configMapPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
return &configMapVolumeUnmounter{&configMapVolume{volName, podUID, plugin, plugin.host.GetMounter(), plugin.host.GetWriter(), volume.MetricsNil{}}}, nil
|
||||
}
|
||||
|
||||
func (plugin *configMapPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
configMapVolume := &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(configMapVolume), nil
|
||||
}
|
||||
|
||||
type configMapVolume struct {
|
||||
volName string
|
||||
podUID types.UID
|
||||
plugin *configMapPlugin
|
||||
mounter mount.Interface
|
||||
writer ioutil.Writer
|
||||
volume.MetricsNil
|
||||
}
|
||||
|
||||
var _ volume.Volume = &configMapVolume{}
|
||||
|
||||
func (sv *configMapVolume) GetPath() string {
|
||||
return sv.plugin.host.GetPodVolumeDir(sv.podUID, strings.EscapeQualifiedNameForDisk(configMapPluginName), sv.volName)
|
||||
}
|
||||
|
||||
// configMapVolumeMounter handles retrieving secrets from the API server
|
||||
// and placing them into the volume on the host.
|
||||
type configMapVolumeMounter struct {
|
||||
*configMapVolume
|
||||
|
||||
source v1.ConfigMapVolumeSource
|
||||
pod v1.Pod
|
||||
opts *volume.VolumeOptions
|
||||
}
|
||||
|
||||
var _ volume.Mounter = &configMapVolumeMounter{}
|
||||
|
||||
func (sv *configMapVolume) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: true,
|
||||
Managed: true,
|
||||
SupportsSELinux: true,
|
||||
}
|
||||
}
|
||||
|
||||
func wrappedVolumeSpec() volume.Spec {
|
||||
// This is the spec for the volume that this plugin wraps.
|
||||
return volume.Spec{
|
||||
// This should be on a tmpfs instead of the local disk; the problem is
|
||||
// charging the memory for the tmpfs to the right cgroup. We should make
|
||||
// this a tmpfs when we can do the accounting correctly.
|
||||
Volume: &v1.Volume{VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}},
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (b *configMapVolumeMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *configMapVolumeMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
glog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir)
|
||||
|
||||
// Wrap EmptyDir, let it do the setup.
|
||||
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), &b.pod, *b.opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
kubeClient := b.plugin.host.GetKubeClient()
|
||||
if kubeClient == nil {
|
||||
return fmt.Errorf("Cannot setup configMap volume %v because kube client is not configured", b.volName)
|
||||
}
|
||||
|
||||
configMap, err := kubeClient.Core().ConfigMaps(b.pod.Namespace).Get(b.source.Name)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get configMap %v/%v: %v", b.pod.Namespace, b.source.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
totalBytes := totalBytes(configMap)
|
||||
glog.V(3).Infof("Received configMap %v/%v containing (%v) pieces of data, %v total bytes",
|
||||
b.pod.Namespace,
|
||||
b.source.Name,
|
||||
len(configMap.Data),
|
||||
totalBytes)
|
||||
|
||||
payload, err := makePayload(b.source.Items, configMap, b.source.DefaultMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName)
|
||||
writer, err := volumeutil.NewAtomicWriter(dir, writerContext)
|
||||
if err != nil {
|
||||
glog.Errorf("Error creating atomic writer: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = writer.Write(payload)
|
||||
if err != nil {
|
||||
glog.Errorf("Error writing payload to dir: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = volume.SetVolumeOwnership(b, fsGroup)
|
||||
if err != nil {
|
||||
glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func makePayload(mappings []v1.KeyToPath, configMap *v1.ConfigMap, defaultMode *int32) (map[string]volumeutil.FileProjection, error) {
|
||||
if defaultMode == nil {
|
||||
return nil, fmt.Errorf("No defaultMode used, not even the default value for it")
|
||||
}
|
||||
|
||||
payload := make(map[string]volumeutil.FileProjection, len(configMap.Data))
|
||||
var fileProjection volumeutil.FileProjection
|
||||
|
||||
if len(mappings) == 0 {
|
||||
for name, data := range configMap.Data {
|
||||
fileProjection.Data = []byte(data)
|
||||
fileProjection.Mode = *defaultMode
|
||||
payload[name] = fileProjection
|
||||
}
|
||||
} else {
|
||||
for _, ktp := range mappings {
|
||||
content, ok := configMap.Data[ktp.Key]
|
||||
if !ok {
|
||||
err_msg := "references non-existent config key"
|
||||
glog.Errorf(err_msg)
|
||||
return nil, fmt.Errorf(err_msg)
|
||||
}
|
||||
|
||||
fileProjection.Data = []byte(content)
|
||||
if ktp.Mode != nil {
|
||||
fileProjection.Mode = *ktp.Mode
|
||||
} else {
|
||||
fileProjection.Mode = *defaultMode
|
||||
}
|
||||
payload[ktp.Path] = fileProjection
|
||||
}
|
||||
}
|
||||
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
func totalBytes(configMap *v1.ConfigMap) int {
|
||||
totalSize := 0
|
||||
for _, value := range configMap.Data {
|
||||
totalSize += len(value)
|
||||
}
|
||||
|
||||
return totalSize
|
||||
}
|
||||
|
||||
// configMapVolumeUnmounter handles cleaning up configMap volumes.
|
||||
type configMapVolumeUnmounter struct {
|
||||
*configMapVolume
|
||||
}
|
||||
|
||||
var _ volume.Unmounter = &configMapVolumeUnmounter{}
|
||||
|
||||
func (c *configMapVolumeUnmounter) TearDown() error {
|
||||
return c.TearDownAt(c.GetPath())
|
||||
}
|
||||
|
||||
func (c *configMapVolumeUnmounter) TearDownAt(dir string) error {
|
||||
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir)
|
||||
|
||||
// Wrap EmptyDir, let it do the teardown.
|
||||
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return wrapped.TearDownAt(dir)
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.ConfigMapVolumeSource, bool) {
|
||||
var readOnly bool
|
||||
var volumeSource *v1.ConfigMapVolumeSource
|
||||
|
||||
if spec.Volume != nil && spec.Volume.ConfigMap != nil {
|
||||
volumeSource = spec.Volume.ConfigMap
|
||||
readOnly = spec.ReadOnly
|
||||
}
|
||||
|
||||
return volumeSource, readOnly
|
||||
}
|
||||
453
vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap_test.go
generated
vendored
Normal file
453
vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,453 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package configmap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/empty_dir"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
func TestMakePayload(t *testing.T) {
|
||||
caseMappingMode := int32(0400)
|
||||
cases := []struct {
|
||||
name string
|
||||
mappings []v1.KeyToPath
|
||||
configMap *v1.ConfigMap
|
||||
mode int32
|
||||
payload map[string]util.FileProjection
|
||||
success bool
|
||||
}{
|
||||
{
|
||||
name: "no overrides",
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"foo": "foo",
|
||||
"bar": "bar",
|
||||
},
|
||||
},
|
||||
mode: 0644,
|
||||
payload: map[string]util.FileProjection{
|
||||
"foo": {Data: []byte("foo"), Mode: 0644},
|
||||
"bar": {Data: []byte("bar"), Mode: 0644},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
name: "basic 1",
|
||||
mappings: []v1.KeyToPath{
|
||||
{
|
||||
Key: "foo",
|
||||
Path: "path/to/foo.txt",
|
||||
},
|
||||
},
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"foo": "foo",
|
||||
"bar": "bar",
|
||||
},
|
||||
},
|
||||
mode: 0644,
|
||||
payload: map[string]util.FileProjection{
|
||||
"path/to/foo.txt": {Data: []byte("foo"), Mode: 0644},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
name: "subdirs",
|
||||
mappings: []v1.KeyToPath{
|
||||
{
|
||||
Key: "foo",
|
||||
Path: "path/to/1/2/3/foo.txt",
|
||||
},
|
||||
},
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"foo": "foo",
|
||||
"bar": "bar",
|
||||
},
|
||||
},
|
||||
mode: 0644,
|
||||
payload: map[string]util.FileProjection{
|
||||
"path/to/1/2/3/foo.txt": {Data: []byte("foo"), Mode: 0644},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
name: "subdirs 2",
|
||||
mappings: []v1.KeyToPath{
|
||||
{
|
||||
Key: "foo",
|
||||
Path: "path/to/1/2/3/foo.txt",
|
||||
},
|
||||
},
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"foo": "foo",
|
||||
"bar": "bar",
|
||||
},
|
||||
},
|
||||
mode: 0644,
|
||||
payload: map[string]util.FileProjection{
|
||||
"path/to/1/2/3/foo.txt": {Data: []byte("foo"), Mode: 0644},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
name: "subdirs 3",
|
||||
mappings: []v1.KeyToPath{
|
||||
{
|
||||
Key: "foo",
|
||||
Path: "path/to/1/2/3/foo.txt",
|
||||
},
|
||||
{
|
||||
Key: "bar",
|
||||
Path: "another/path/to/the/esteemed/bar.bin",
|
||||
},
|
||||
},
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"foo": "foo",
|
||||
"bar": "bar",
|
||||
},
|
||||
},
|
||||
mode: 0644,
|
||||
payload: map[string]util.FileProjection{
|
||||
"path/to/1/2/3/foo.txt": {Data: []byte("foo"), Mode: 0644},
|
||||
"another/path/to/the/esteemed/bar.bin": {Data: []byte("bar"), Mode: 0644},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
name: "non existent key",
|
||||
mappings: []v1.KeyToPath{
|
||||
{
|
||||
Key: "zab",
|
||||
Path: "path/to/foo.txt",
|
||||
},
|
||||
},
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"foo": "foo",
|
||||
"bar": "bar",
|
||||
},
|
||||
},
|
||||
mode: 0644,
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
name: "mapping with Mode",
|
||||
mappings: []v1.KeyToPath{
|
||||
{
|
||||
Key: "foo",
|
||||
Path: "foo.txt",
|
||||
Mode: &caseMappingMode,
|
||||
},
|
||||
{
|
||||
Key: "bar",
|
||||
Path: "bar.bin",
|
||||
Mode: &caseMappingMode,
|
||||
},
|
||||
},
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"foo": "foo",
|
||||
"bar": "bar",
|
||||
},
|
||||
},
|
||||
mode: 0644,
|
||||
payload: map[string]util.FileProjection{
|
||||
"foo.txt": {Data: []byte("foo"), Mode: caseMappingMode},
|
||||
"bar.bin": {Data: []byte("bar"), Mode: caseMappingMode},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
name: "mapping with defaultMode",
|
||||
mappings: []v1.KeyToPath{
|
||||
{
|
||||
Key: "foo",
|
||||
Path: "foo.txt",
|
||||
},
|
||||
{
|
||||
Key: "bar",
|
||||
Path: "bar.bin",
|
||||
},
|
||||
},
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"foo": "foo",
|
||||
"bar": "bar",
|
||||
},
|
||||
},
|
||||
mode: 0644,
|
||||
payload: map[string]util.FileProjection{
|
||||
"foo.txt": {Data: []byte("foo"), Mode: 0644},
|
||||
"bar.bin": {Data: []byte("bar"), Mode: 0644},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
actualPayload, err := makePayload(tc.mappings, tc.configMap, &tc.mode)
|
||||
if err != nil && tc.success {
|
||||
t.Errorf("%v: unexpected failure making payload: %v", tc.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err == nil && !tc.success {
|
||||
t.Errorf("%v: unexpected success making payload", tc.name)
|
||||
continue
|
||||
}
|
||||
|
||||
if !tc.success {
|
||||
continue
|
||||
}
|
||||
|
||||
if e, a := tc.payload, actualPayload; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("%v: expected and actual payload do not match", tc.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newTestHost(t *testing.T, clientset clientset.Interface) (string, volume.VolumeHost) {
|
||||
tempDir, err := ioutil.TempDir("/tmp", "configmap_volume_test.")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp rootdir: %v", err)
|
||||
}
|
||||
|
||||
return tempDir, volumetest.NewFakeVolumeHost(tempDir, clientset, empty_dir.ProbeVolumePlugins())
|
||||
}
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
pluginMgr := volume.VolumePluginMgr{}
|
||||
tempDir, host := newTestHost(t, nil)
|
||||
defer os.RemoveAll(tempDir)
|
||||
pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
|
||||
|
||||
plugin, err := pluginMgr.FindPluginByName(configMapPluginName)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if plugin.GetPluginName() != configMapPluginName {
|
||||
t.Errorf("Wrong name: %s", plugin.GetPluginName())
|
||||
}
|
||||
if !plugin.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: ""}}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
if plugin.CanSupport(&volume.Spec{}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPlugin(t *testing.T) {
|
||||
var (
|
||||
testPodUID = types.UID("test_pod_uid")
|
||||
testVolumeName = "test_volume_name"
|
||||
testNamespace = "test_configmap_namespace"
|
||||
testName = "test_configmap_name"
|
||||
|
||||
volumeSpec = volumeSpec(testVolumeName, testName, 0644)
|
||||
configMap = configMap(testNamespace, testName)
|
||||
client = fake.NewSimpleClientset(&configMap)
|
||||
pluginMgr = volume.VolumePluginMgr{}
|
||||
tempDir, host = newTestHost(t, client)
|
||||
)
|
||||
|
||||
defer os.RemoveAll(tempDir)
|
||||
pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
|
||||
|
||||
plugin, err := pluginMgr.FindPluginByName(configMapPluginName)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
|
||||
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
|
||||
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
vName, err := plugin.GetVolumeName(volume.NewSpecFromVolume(volumeSpec))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to GetVolumeName: %v", err)
|
||||
}
|
||||
if vName != "test_volume_name/test_configmap_name" {
|
||||
t.Errorf("Got unexpect VolumeName %v", vName)
|
||||
}
|
||||
|
||||
volumePath := mounter.GetPath()
|
||||
if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~configmap/test_volume_name")) {
|
||||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
|
||||
fsGroup := int64(1001)
|
||||
err = mounter.SetUp(&fsGroup)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup volume: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(volumePath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
doTestConfigMapDataInVolume(volumePath, configMap, t)
|
||||
doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
|
||||
}
|
||||
|
||||
// Test the case where the plugin's ready file exists, but the volume dir is not a
|
||||
// mountpoint, which is the state the system will be in after reboot. The dir
|
||||
// should be mounter and the configMap data written to it.
|
||||
func TestPluginReboot(t *testing.T) {
|
||||
var (
|
||||
testPodUID = types.UID("test_pod_uid3")
|
||||
testVolumeName = "test_volume_name"
|
||||
testNamespace = "test_configmap_namespace"
|
||||
testName = "test_configmap_name"
|
||||
|
||||
volumeSpec = volumeSpec(testVolumeName, testName, 0644)
|
||||
configMap = configMap(testNamespace, testName)
|
||||
client = fake.NewSimpleClientset(&configMap)
|
||||
pluginMgr = volume.VolumePluginMgr{}
|
||||
rootDir, host = newTestHost(t, client)
|
||||
)
|
||||
|
||||
defer os.RemoveAll(rootDir)
|
||||
pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
|
||||
|
||||
plugin, err := pluginMgr.FindPluginByName(configMapPluginName)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
|
||||
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
|
||||
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
podMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid3/plugins/kubernetes.io~configmap/test_volume_name", rootDir)
|
||||
util.SetReady(podMetadataDir)
|
||||
volumePath := mounter.GetPath()
|
||||
if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid3/volumes/kubernetes.io~configmap/test_volume_name")) {
|
||||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
|
||||
fsGroup := int64(1001)
|
||||
err = mounter.SetUp(&fsGroup)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup volume: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(volumePath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
doTestConfigMapDataInVolume(volumePath, configMap, t)
|
||||
doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
|
||||
}
|
||||
|
||||
func volumeSpec(volumeName, configMapName string, defaultMode int32) *v1.Volume {
|
||||
return &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: configMapName,
|
||||
},
|
||||
DefaultMode: &defaultMode,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func configMap(namespace, name string) v1.ConfigMap {
|
||||
return v1.ConfigMap{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
"data-2": "value-2",
|
||||
"data-3": "value-3",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func doTestConfigMapDataInVolume(volumePath string, configMap v1.ConfigMap, t *testing.T) {
|
||||
for key, value := range configMap.Data {
|
||||
configMapDataHostPath := path.Join(volumePath, key)
|
||||
if _, err := os.Stat(configMapDataHostPath); err != nil {
|
||||
t.Fatalf("SetUp() failed, couldn't find configMap data on disk: %v", configMapDataHostPath)
|
||||
} else {
|
||||
actualValue, err := ioutil.ReadFile(configMapDataHostPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't read configMap data from: %v", configMapDataHostPath)
|
||||
}
|
||||
|
||||
if value != string(actualValue) {
|
||||
t.Errorf("Unexpected value; expected %q, got %q", value, actualValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func doTestCleanAndTeardown(plugin volume.VolumePlugin, podUID types.UID, testVolumeName, volumePath string, t *testing.T) {
|
||||
unmounter, err := plugin.NewUnmounter(testVolumeName, podUID)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
t.Errorf("Got a nil Unmounter")
|
||||
}
|
||||
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(volumePath); err == nil {
|
||||
t.Errorf("TearDown() failed, volume path still exists: %s", volumePath)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
18
vendor/k8s.io/kubernetes/pkg/volume/configmap/doc.go
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/pkg/volume/configmap/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package configmap contains the internal representation of configMap volumes.
|
||||
package configmap // import "k8s.io/kubernetes/pkg/volume/configmap"
|
||||
19
vendor/k8s.io/kubernetes/pkg/volume/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/volume/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package volume includes internal representations of external volume types
|
||||
// as well as utility methods required to mount/unmount volumes to kubelets.
|
||||
package volume // import "k8s.io/kubernetes/pkg/volume"
|
||||
45
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/BUILD
generated
vendored
Normal file
45
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["downwardapi.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/fieldpath:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/errors:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["downwardapi_test.go"],
|
||||
library = "go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/release_1_5:go_default_library",
|
||||
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
|
||||
"//pkg/fieldpath:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/testing:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/empty_dir:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
3
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/OWNERS
generated
vendored
Normal file
3
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/OWNERS
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
maintainers:
|
||||
- pmorie
|
||||
- sdminonne
|
||||
299
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go
generated
vendored
Normal file
299
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go
generated
vendored
Normal file
|
|
@ -0,0 +1,299 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package downwardapi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/fieldpath"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
utilerrors "k8s.io/kubernetes/pkg/util/errors"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// ProbeVolumePlugins is the entry point for plugin detection in a package.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&downwardAPIPlugin{}}
|
||||
}
|
||||
|
||||
const (
|
||||
downwardAPIPluginName = "kubernetes.io/downward-api"
|
||||
)
|
||||
|
||||
// downwardAPIPlugin implements the VolumePlugin interface.
|
||||
type downwardAPIPlugin struct {
|
||||
host volume.VolumeHost
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &downwardAPIPlugin{}
|
||||
|
||||
func wrappedVolumeSpec() volume.Spec {
|
||||
return volume.Spec{
|
||||
Volume: &v1.Volume{VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory}}},
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) GetPluginName() string {
|
||||
return downwardAPIPluginName
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _ := getVolumeSource(spec)
|
||||
if volumeSource == nil {
|
||||
return "", fmt.Errorf("Spec does not reference a DownwardAPI volume type")
|
||||
}
|
||||
|
||||
// Return user defined volume name, since this is an ephemeral volume type
|
||||
return spec.Name(), nil
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
return spec.Volume != nil && spec.Volume.DownwardAPI != nil
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) RequiresRemount() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||
v := &downwardAPIVolume{
|
||||
volName: spec.Name(),
|
||||
items: spec.Volume.DownwardAPI.Items,
|
||||
pod: pod,
|
||||
podUID: pod.UID,
|
||||
plugin: plugin,
|
||||
}
|
||||
return &downwardAPIVolumeMounter{
|
||||
downwardAPIVolume: v,
|
||||
source: *spec.Volume.DownwardAPI,
|
||||
opts: &opts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
return &downwardAPIVolumeUnmounter{
|
||||
&downwardAPIVolume{
|
||||
volName: volName,
|
||||
podUID: podUID,
|
||||
plugin: plugin,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
downwardAPIVolume := &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
DownwardAPI: &v1.DownwardAPIVolumeSource{},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(downwardAPIVolume), nil
|
||||
}
|
||||
|
||||
// downwardAPIVolume retrieves downward API data and placing them into the volume on the host.
|
||||
type downwardAPIVolume struct {
|
||||
volName string
|
||||
items []v1.DownwardAPIVolumeFile
|
||||
pod *v1.Pod
|
||||
podUID types.UID // TODO: remove this redundancy as soon NewUnmounter func will have *v1.POD and not only types.UID
|
||||
plugin *downwardAPIPlugin
|
||||
volume.MetricsNil
|
||||
}
|
||||
|
||||
// downwardAPIVolumeMounter fetches info from downward API from the pod
|
||||
// and dumps it in files
|
||||
type downwardAPIVolumeMounter struct {
|
||||
*downwardAPIVolume
|
||||
source v1.DownwardAPIVolumeSource
|
||||
opts *volume.VolumeOptions
|
||||
}
|
||||
|
||||
// downwardAPIVolumeMounter implements volume.Mounter interface
|
||||
var _ volume.Mounter = &downwardAPIVolumeMounter{}
|
||||
|
||||
// downward API volumes are always ReadOnlyManaged
|
||||
func (d *downwardAPIVolume) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: true,
|
||||
Managed: true,
|
||||
SupportsSELinux: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (b *downwardAPIVolumeMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUp puts in place the volume plugin.
|
||||
// This function is not idempotent by design. We want the data to be refreshed periodically.
|
||||
// The internal sync interval of kubelet will drive the refresh of data.
|
||||
// TODO: Add volume specific ticker and refresh loop
|
||||
func (b *downwardAPIVolumeMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
glog.V(3).Infof("Setting up a downwardAPI volume %v for pod %v/%v at %v", b.volName, b.pod.Namespace, b.pod.Name, dir)
|
||||
// Wrap EmptyDir. Here we rely on the idempotency of the wrapped plugin to avoid repeatedly mounting
|
||||
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), b.pod, *b.opts)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
glog.Errorf("Unable to setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := b.collectData(b.source.DefaultMode)
|
||||
if err != nil {
|
||||
glog.Errorf("Error preparing data for downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName)
|
||||
writer, err := volumeutil.NewAtomicWriter(dir, writerContext)
|
||||
if err != nil {
|
||||
glog.Errorf("Error creating atomic writer: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = writer.Write(data)
|
||||
if err != nil {
|
||||
glog.Errorf("Error writing payload to dir: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = volume.SetVolumeOwnership(b, fsGroup)
|
||||
if err != nil {
|
||||
glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// collectData collects requested downwardAPI in data map.
|
||||
// Map's key is the requested name of file to dump
|
||||
// Map's value is the (sorted) content of the field to be dumped in the file.
|
||||
func (d *downwardAPIVolume) collectData(defaultMode *int32) (map[string]volumeutil.FileProjection, error) {
|
||||
if defaultMode == nil {
|
||||
return nil, fmt.Errorf("No defaultMode used, not even the default value for it")
|
||||
}
|
||||
|
||||
errlist := []error{}
|
||||
data := make(map[string]volumeutil.FileProjection)
|
||||
for _, fileInfo := range d.items {
|
||||
var fileProjection volumeutil.FileProjection
|
||||
fPath := path.Clean(fileInfo.Path)
|
||||
if fileInfo.Mode != nil {
|
||||
fileProjection.Mode = *fileInfo.Mode
|
||||
} else {
|
||||
fileProjection.Mode = *defaultMode
|
||||
}
|
||||
if fileInfo.FieldRef != nil {
|
||||
// TODO: unify with Kubelet.podFieldSelectorRuntimeValue
|
||||
if values, err := fieldpath.ExtractFieldPathAsString(d.pod, fileInfo.FieldRef.FieldPath); err != nil {
|
||||
glog.Errorf("Unable to extract field %s: %s", fileInfo.FieldRef.FieldPath, err.Error())
|
||||
errlist = append(errlist, err)
|
||||
} else {
|
||||
fileProjection.Data = []byte(sortLines(values))
|
||||
}
|
||||
} else if fileInfo.ResourceFieldRef != nil {
|
||||
containerName := fileInfo.ResourceFieldRef.ContainerName
|
||||
nodeAllocatable, err := d.plugin.host.GetNodeAllocatable()
|
||||
if err != nil {
|
||||
errlist = append(errlist, err)
|
||||
} else if values, err := fieldpath.ExtractResourceValueByContainerNameAndNodeAllocatable(fileInfo.ResourceFieldRef, d.pod, containerName, nodeAllocatable); err != nil {
|
||||
glog.Errorf("Unable to extract field %s: %s", fileInfo.ResourceFieldRef.Resource, err.Error())
|
||||
errlist = append(errlist, err)
|
||||
} else {
|
||||
fileProjection.Data = []byte(sortLines(values))
|
||||
}
|
||||
}
|
||||
|
||||
data[fPath] = fileProjection
|
||||
}
|
||||
return data, utilerrors.NewAggregate(errlist)
|
||||
}
|
||||
|
||||
// sortLines sorts the strings generated from map based data
|
||||
// (annotations and labels)
|
||||
func sortLines(values string) string {
|
||||
splitted := strings.Split(values, "\n")
|
||||
sort.Strings(splitted)
|
||||
return strings.Join(splitted, "\n")
|
||||
}
|
||||
|
||||
func (d *downwardAPIVolume) GetPath() string {
|
||||
return d.plugin.host.GetPodVolumeDir(d.podUID, utilstrings.EscapeQualifiedNameForDisk(downwardAPIPluginName), d.volName)
|
||||
}
|
||||
|
||||
// downwardAPIVolumeCleaner handles cleaning up downwardAPI volumes
|
||||
type downwardAPIVolumeUnmounter struct {
|
||||
*downwardAPIVolume
|
||||
}
|
||||
|
||||
// downwardAPIVolumeUnmounter implements volume.Unmounter interface
|
||||
var _ volume.Unmounter = &downwardAPIVolumeUnmounter{}
|
||||
|
||||
func (c *downwardAPIVolumeUnmounter) TearDown() error {
|
||||
return c.TearDownAt(c.GetPath())
|
||||
}
|
||||
|
||||
func (c *downwardAPIVolumeUnmounter) TearDownAt(dir string) error {
|
||||
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir)
|
||||
|
||||
// Wrap EmptyDir, let it do the teardown.
|
||||
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return wrapped.TearDownAt(dir)
|
||||
}
|
||||
|
||||
func (b *downwardAPIVolumeMounter) getMetaDir() string {
|
||||
return path.Join(b.plugin.host.GetPodPluginDir(b.podUID, utilstrings.EscapeQualifiedNameForDisk(downwardAPIPluginName)), b.volName)
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.DownwardAPIVolumeSource, bool) {
|
||||
var readOnly bool
|
||||
var volumeSource *v1.DownwardAPIVolumeSource
|
||||
|
||||
if spec.Volume != nil && spec.Volume.DownwardAPI != nil {
|
||||
volumeSource = spec.Volume.DownwardAPI
|
||||
readOnly = spec.ReadOnly
|
||||
}
|
||||
|
||||
return volumeSource, readOnly
|
||||
}
|
||||
837
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi_test.go
generated
vendored
Normal file
837
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
46
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/BUILD
generated
vendored
Normal file
46
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"empty_dir.go",
|
||||
"empty_dir_linux.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["empty_dir_test.go"],
|
||||
library = "go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/testing:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
],
|
||||
)
|
||||
4
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/OWNERS
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/OWNERS
generated
vendored
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
maintainers:
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- thockin
|
||||
19
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package empty_dir contains the internal representation of emptyDir
|
||||
// volumes.
|
||||
package empty_dir // import "k8s.io/kubernetes/pkg/volume/empty_dir"
|
||||
354
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir.go
generated
vendored
Normal file
354
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir.go
generated
vendored
Normal file
|
|
@ -0,0 +1,354 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package empty_dir
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// TODO: in the near future, this will be changed to be more restrictive
|
||||
// and the group will be set to allow containers to use emptyDir volumes
|
||||
// from the group attribute.
|
||||
//
|
||||
// http://issue.k8s.io/2630
|
||||
const perm os.FileMode = 0777
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{
|
||||
&emptyDirPlugin{nil},
|
||||
}
|
||||
}
|
||||
|
||||
type emptyDirPlugin struct {
|
||||
host volume.VolumeHost
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &emptyDirPlugin{}
|
||||
|
||||
const (
|
||||
emptyDirPluginName = "kubernetes.io/empty-dir"
|
||||
)
|
||||
|
||||
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
|
||||
return host.GetPodVolumeDir(uid, strings.EscapeQualifiedNameForDisk(emptyDirPluginName), volName)
|
||||
}
|
||||
|
||||
func (plugin *emptyDirPlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *emptyDirPlugin) GetPluginName() string {
|
||||
return emptyDirPluginName
|
||||
}
|
||||
|
||||
func (plugin *emptyDirPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _ := getVolumeSource(spec)
|
||||
if volumeSource == nil {
|
||||
return "", fmt.Errorf("Spec does not reference an EmptyDir volume type")
|
||||
}
|
||||
|
||||
// Return user defined volume name, since this is an ephemeral volume type
|
||||
return spec.Name(), nil
|
||||
}
|
||||
|
||||
func (plugin *emptyDirPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
if spec.Volume != nil && spec.Volume.EmptyDir != nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *emptyDirPlugin) RequiresRemount() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *emptyDirPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||
return plugin.newMounterInternal(spec, pod, plugin.host.GetMounter(), &realMountDetector{plugin.host.GetMounter()}, opts)
|
||||
}
|
||||
|
||||
func (plugin *emptyDirPlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, mounter mount.Interface, mountDetector mountDetector, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||
medium := v1.StorageMediumDefault
|
||||
if spec.Volume.EmptyDir != nil { // Support a non-specified source as EmptyDir.
|
||||
medium = spec.Volume.EmptyDir.Medium
|
||||
}
|
||||
return &emptyDir{
|
||||
pod: pod,
|
||||
volName: spec.Name(),
|
||||
medium: medium,
|
||||
mounter: mounter,
|
||||
mountDetector: mountDetector,
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsDu(getPath(pod.UID, spec.Name(), plugin.host)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *emptyDirPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
// Inject real implementations here, test through the internal function.
|
||||
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter(), &realMountDetector{plugin.host.GetMounter()})
|
||||
}
|
||||
|
||||
func (plugin *emptyDirPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface, mountDetector mountDetector) (volume.Unmounter, error) {
|
||||
ed := &emptyDir{
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: podUID}},
|
||||
volName: volName,
|
||||
medium: v1.StorageMediumDefault, // might be changed later
|
||||
mounter: mounter,
|
||||
mountDetector: mountDetector,
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsDu(getPath(podUID, volName, plugin.host)),
|
||||
}
|
||||
return ed, nil
|
||||
}
|
||||
|
||||
func (plugin *emptyDirPlugin) ConstructVolumeSpec(volName, mountPath string) (*volume.Spec, error) {
|
||||
emptyDirVolume := &v1.Volume{
|
||||
Name: volName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(emptyDirVolume), nil
|
||||
}
|
||||
|
||||
// mountDetector abstracts how to find what kind of mount a path is backed by.
|
||||
type mountDetector interface {
|
||||
// GetMountMedium determines what type of medium a given path is backed
|
||||
// by and whether that path is a mount point. For example, if this
|
||||
// returns (mediumMemory, false, nil), the caller knows that the path is
|
||||
// on a memory FS (tmpfs on Linux) but is not the root mountpoint of
|
||||
// that tmpfs.
|
||||
GetMountMedium(path string) (storageMedium, bool, error)
|
||||
}
|
||||
|
||||
type storageMedium int
|
||||
|
||||
const (
|
||||
mediumUnknown storageMedium = 0 // assume anything we don't explicitly handle is this
|
||||
mediumMemory storageMedium = 1 // memory (e.g. tmpfs on linux)
|
||||
)
|
||||
|
||||
// EmptyDir volumes are temporary directories exposed to the pod.
|
||||
// These do not persist beyond the lifetime of a pod.
|
||||
type emptyDir struct {
|
||||
pod *v1.Pod
|
||||
volName string
|
||||
medium v1.StorageMedium
|
||||
mounter mount.Interface
|
||||
mountDetector mountDetector
|
||||
plugin *emptyDirPlugin
|
||||
volume.MetricsProvider
|
||||
}
|
||||
|
||||
func (ed *emptyDir) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: false,
|
||||
Managed: true,
|
||||
SupportsSELinux: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (b *emptyDir) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUp creates new directory.
|
||||
func (ed *emptyDir) SetUp(fsGroup *int64) error {
|
||||
return ed.SetUpAt(ed.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
// SetUpAt creates new directory.
|
||||
func (ed *emptyDir) SetUpAt(dir string, fsGroup *int64) error {
|
||||
notMnt, err := ed.mounter.IsLikelyNotMountPoint(dir)
|
||||
// Getting an os.IsNotExist err from is a contingency; the directory
|
||||
// may not exist yet, in which case, setup should run.
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the plugin readiness file is present for this volume, and the
|
||||
// storage medium is the default, then the volume is ready. If the
|
||||
// medium is memory, and a mountpoint is present, then the volume is
|
||||
// ready.
|
||||
if volumeutil.IsReady(ed.getMetaDir()) {
|
||||
if ed.medium == v1.StorageMediumMemory && !notMnt {
|
||||
return nil
|
||||
} else if ed.medium == v1.StorageMediumDefault {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
switch ed.medium {
|
||||
case v1.StorageMediumDefault:
|
||||
err = ed.setupDir(dir)
|
||||
case v1.StorageMediumMemory:
|
||||
err = ed.setupTmpfs(dir)
|
||||
default:
|
||||
err = fmt.Errorf("unknown storage medium %q", ed.medium)
|
||||
}
|
||||
|
||||
volume.SetVolumeOwnership(ed, fsGroup)
|
||||
|
||||
if err == nil {
|
||||
volumeutil.SetReady(ed.getMetaDir())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// setupTmpfs creates a tmpfs mount at the specified directory with the
|
||||
// specified SELinux context.
|
||||
func (ed *emptyDir) setupTmpfs(dir string) error {
|
||||
if ed.mounter == nil {
|
||||
return fmt.Errorf("memory storage requested, but mounter is nil")
|
||||
}
|
||||
if err := ed.setupDir(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
// Make SetUp idempotent.
|
||||
medium, isMnt, err := ed.mountDetector.GetMountMedium(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the directory is a mountpoint with medium memory, there is no
|
||||
// work to do since we are already in the desired state.
|
||||
if isMnt && medium == mediumMemory {
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(3).Infof("pod %v: mounting tmpfs for volume %v", ed.pod.UID, ed.volName)
|
||||
return ed.mounter.Mount("tmpfs", dir, "tmpfs", nil /* options */)
|
||||
}
|
||||
|
||||
// setupDir creates the directory with the specified SELinux context and
|
||||
// the default permissions specified by the perm constant.
|
||||
func (ed *emptyDir) setupDir(dir string) error {
|
||||
// Create the directory if it doesn't already exist.
|
||||
if err := os.MkdirAll(dir, perm); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// stat the directory to read permission bits
|
||||
fileinfo, err := os.Lstat(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if fileinfo.Mode().Perm() != perm.Perm() {
|
||||
// If the permissions on the created directory are wrong, the
|
||||
// kubelet is probably running with a umask set. In order to
|
||||
// avoid clearing the umask for the entire process or locking
|
||||
// the thread, clearing the umask, creating the dir, restoring
|
||||
// the umask, and unlocking the thread, we do a chmod to set
|
||||
// the specific bits we need.
|
||||
err := os.Chmod(dir, perm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fileinfo, err = os.Lstat(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if fileinfo.Mode().Perm() != perm.Perm() {
|
||||
glog.Errorf("Expected directory %q permissions to be: %s; got: %s", dir, perm.Perm(), fileinfo.Mode().Perm())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ed *emptyDir) GetPath() string {
|
||||
return getPath(ed.pod.UID, ed.volName, ed.plugin.host)
|
||||
}
|
||||
|
||||
// TearDown simply discards everything in the directory.
|
||||
func (ed *emptyDir) TearDown() error {
|
||||
return ed.TearDownAt(ed.GetPath())
|
||||
}
|
||||
|
||||
// TearDownAt simply discards everything in the directory.
|
||||
func (ed *emptyDir) TearDownAt(dir string) error {
|
||||
// Figure out the medium.
|
||||
medium, isMnt, err := ed.mountDetector.GetMountMedium(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isMnt && medium == mediumMemory {
|
||||
ed.medium = v1.StorageMediumMemory
|
||||
return ed.teardownTmpfs(dir)
|
||||
}
|
||||
// assume StorageMediumDefault
|
||||
return ed.teardownDefault(dir)
|
||||
}
|
||||
|
||||
func (ed *emptyDir) teardownDefault(dir string) error {
|
||||
tmpDir, err := volume.RenameDirectory(dir, ed.volName+".deleting~")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.RemoveAll(tmpDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ed *emptyDir) teardownTmpfs(dir string) error {
|
||||
if ed.mounter == nil {
|
||||
return fmt.Errorf("memory storage requested, but mounter is nil")
|
||||
}
|
||||
if err := ed.mounter.Unmount(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.RemoveAll(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ed *emptyDir) getMetaDir() string {
|
||||
return path.Join(ed.plugin.host.GetPodPluginDir(ed.pod.UID, strings.EscapeQualifiedNameForDisk(emptyDirPluginName)), ed.volName)
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.EmptyDirVolumeSource, bool) {
|
||||
var readOnly bool
|
||||
var volumeSource *v1.EmptyDirVolumeSource
|
||||
|
||||
if spec.Volume != nil && spec.Volume.EmptyDir != nil {
|
||||
volumeSource = spec.Volume.EmptyDir
|
||||
readOnly = spec.ReadOnly
|
||||
}
|
||||
|
||||
return volumeSource, readOnly
|
||||
}
|
||||
53
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir_linux.go
generated
vendored
Normal file
53
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir_linux.go
generated
vendored
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package empty_dir
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
// Defined by Linux - the type number for tmpfs mounts.
|
||||
const linuxTmpfsMagic = 0x01021994
|
||||
|
||||
// realMountDetector implements mountDetector in terms of syscalls.
|
||||
type realMountDetector struct {
|
||||
mounter mount.Interface
|
||||
}
|
||||
|
||||
func (m *realMountDetector) GetMountMedium(path string) (storageMedium, bool, error) {
|
||||
glog.V(5).Infof("Determining mount medium of %v", path)
|
||||
notMnt, err := m.mounter.IsLikelyNotMountPoint(path)
|
||||
if err != nil {
|
||||
return 0, false, fmt.Errorf("IsLikelyNotMountPoint(%q): %v", path, err)
|
||||
}
|
||||
buf := syscall.Statfs_t{}
|
||||
if err := syscall.Statfs(path, &buf); err != nil {
|
||||
return 0, false, fmt.Errorf("statfs(%q): %v", path, err)
|
||||
}
|
||||
|
||||
glog.V(5).Infof("Statfs_t of %v: %+v", path, buf)
|
||||
if buf.Type == linuxTmpfsMagic {
|
||||
return mediumMemory, !notMnt, nil
|
||||
}
|
||||
return mediumUnknown, !notMnt, nil
|
||||
}
|
||||
274
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir_test.go
generated
vendored
Normal file
274
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,274 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package empty_dir
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// Construct an instance of a plugin, by name.
|
||||
func makePluginUnderTest(t *testing.T, plugName, basePath string) volume.VolumePlugin {
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(basePath, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName(plugName)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
return plug
|
||||
}
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("emptydirTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", tmpDir)
|
||||
|
||||
if plug.GetPluginName() != "kubernetes.io/empty-dir" {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
}
|
||||
|
||||
type fakeMountDetector struct {
|
||||
medium storageMedium
|
||||
isMount bool
|
||||
}
|
||||
|
||||
func (fake *fakeMountDetector) GetMountMedium(path string) (storageMedium, bool, error) {
|
||||
return fake.medium, fake.isMount, nil
|
||||
}
|
||||
|
||||
func TestPluginEmptyRootContext(t *testing.T) {
|
||||
doTestPlugin(t, pluginTestConfig{
|
||||
medium: v1.StorageMediumDefault,
|
||||
expectedSetupMounts: 0,
|
||||
expectedTeardownMounts: 0})
|
||||
}
|
||||
|
||||
type pluginTestConfig struct {
|
||||
medium v1.StorageMedium
|
||||
idempotent bool
|
||||
expectedSetupMounts int
|
||||
shouldBeMountedBeforeTeardown bool
|
||||
expectedTeardownMounts int
|
||||
}
|
||||
|
||||
// doTestPlugin sets up a volume and tears it back down.
|
||||
func doTestPlugin(t *testing.T, config pluginTestConfig) {
|
||||
basePath, err := utiltesting.MkTmpdir("emptydir_volume_test")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp rootdir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(basePath)
|
||||
|
||||
var (
|
||||
volumePath = path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/test-volume")
|
||||
metadataDir = path.Join(basePath, "pods/poduid/plugins/kubernetes.io~empty-dir/test-volume")
|
||||
|
||||
plug = makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath)
|
||||
volumeName = "test-volume"
|
||||
spec = &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{Medium: config.medium}},
|
||||
}
|
||||
|
||||
physicalMounter = mount.FakeMounter{}
|
||||
mountDetector = fakeMountDetector{}
|
||||
pod = &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
)
|
||||
|
||||
if config.idempotent {
|
||||
physicalMounter.MountPoints = []mount.MountPoint{
|
||||
{
|
||||
Path: volumePath,
|
||||
},
|
||||
}
|
||||
util.SetReady(metadataDir)
|
||||
}
|
||||
|
||||
mounter, err := plug.(*emptyDirPlugin).newMounterInternal(volume.NewSpecFromVolume(spec),
|
||||
pod,
|
||||
&physicalMounter,
|
||||
&mountDetector,
|
||||
volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
volPath := mounter.GetPath()
|
||||
if volPath != volumePath {
|
||||
t.Errorf("Got unexpected path: %s", volPath)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
|
||||
// Stat the directory and check the permission bits
|
||||
fileinfo, err := os.Stat(volPath)
|
||||
if !config.idempotent {
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", volPath)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
if e, a := perm, fileinfo.Mode().Perm(); e != a {
|
||||
t.Errorf("Unexpected file mode for %v: expected: %v, got: %v", volPath, e, a)
|
||||
}
|
||||
} else if err == nil {
|
||||
// If this test is for idempotency and we were able
|
||||
// to stat the volume path, it's an error.
|
||||
t.Errorf("Volume directory was created unexpectedly")
|
||||
}
|
||||
|
||||
// Check the number of mounts performed during setup
|
||||
if e, a := config.expectedSetupMounts, len(physicalMounter.Log); e != a {
|
||||
t.Errorf("Expected %v physicalMounter calls during setup, got %v", e, a)
|
||||
} else if config.expectedSetupMounts == 1 &&
|
||||
(physicalMounter.Log[0].Action != mount.FakeActionMount || physicalMounter.Log[0].FSType != "tmpfs") {
|
||||
t.Errorf("Unexpected physicalMounter action during setup: %#v", physicalMounter.Log[0])
|
||||
}
|
||||
physicalMounter.ResetLog()
|
||||
|
||||
// Make an unmounter for the volume
|
||||
teardownMedium := mediumUnknown
|
||||
if config.medium == v1.StorageMediumMemory {
|
||||
teardownMedium = mediumMemory
|
||||
}
|
||||
unmounterMountDetector := &fakeMountDetector{medium: teardownMedium, isMount: config.shouldBeMountedBeforeTeardown}
|
||||
unmounter, err := plug.(*emptyDirPlugin).newUnmounterInternal(volumeName, types.UID("poduid"), &physicalMounter, unmounterMountDetector)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
t.Errorf("Got a nil Unmounter")
|
||||
}
|
||||
|
||||
// Tear down the volume
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(volPath); err == nil {
|
||||
t.Errorf("TearDown() failed, volume path still exists: %s", volPath)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
|
||||
// Check the number of physicalMounter calls during tardown
|
||||
if e, a := config.expectedTeardownMounts, len(physicalMounter.Log); e != a {
|
||||
t.Errorf("Expected %v physicalMounter calls during teardown, got %v", e, a)
|
||||
} else if config.expectedTeardownMounts == 1 && physicalMounter.Log[0].Action != mount.FakeActionUnmount {
|
||||
t.Errorf("Unexpected physicalMounter action during teardown: %#v", physicalMounter.Log[0])
|
||||
}
|
||||
physicalMounter.ResetLog()
|
||||
}
|
||||
|
||||
func TestPluginBackCompat(t *testing.T) {
|
||||
basePath, err := utiltesting.MkTmpdir("emptydirTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(basePath)
|
||||
|
||||
plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath)
|
||||
|
||||
spec := &v1.Volume{
|
||||
Name: "vol1",
|
||||
}
|
||||
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
volPath := mounter.GetPath()
|
||||
if volPath != path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/vol1") {
|
||||
t.Errorf("Got unexpected path: %s", volPath)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMetrics tests that MetricProvider methods return sane values.
|
||||
func TestMetrics(t *testing.T) {
|
||||
// Create an empty temp directory for the volume
|
||||
tmpDir, err := utiltesting.MkTmpdir("empty_dir_test")
|
||||
if err != nil {
|
||||
t.Fatalf("Can't make a tmp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", tmpDir)
|
||||
|
||||
spec := &v1.Volume{
|
||||
Name: "vol1",
|
||||
}
|
||||
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
|
||||
// Need to create the subdirectory
|
||||
os.MkdirAll(mounter.GetPath(), 0755)
|
||||
|
||||
expectedEmptyDirUsage, err := volumetest.FindEmptyDirectoryUsageOnTmpfs()
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error finding expected empty directory usage on tmpfs: %v", err)
|
||||
}
|
||||
|
||||
// TODO(pwittroc): Move this into a reusable testing utility
|
||||
metrics, err := mounter.GetMetrics()
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error when calling GetMetrics %v", err)
|
||||
}
|
||||
if e, a := expectedEmptyDirUsage.Value(), metrics.Used.Value(); e != a {
|
||||
t.Errorf("Unexpected value for empty directory; expected %v, got %v", e, a)
|
||||
}
|
||||
if metrics.Capacity.Value() <= 0 {
|
||||
t.Errorf("Expected Capacity to be greater than 0")
|
||||
}
|
||||
if metrics.Available.Value() <= 0 {
|
||||
t.Errorf("Expected Available to be greater than 0")
|
||||
}
|
||||
}
|
||||
32
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir_unsupported.go
generated
vendored
Normal file
32
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir_unsupported.go
generated
vendored
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
// +build !linux
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package empty_dir
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
// realMountDetector pretends to implement mediumer.
|
||||
type realMountDetector struct {
|
||||
mounter mount.Interface
|
||||
}
|
||||
|
||||
func (m *realMountDetector) GetMountMedium(path string) (storageMedium, bool, error) {
|
||||
return mediumUnknown, false, nil
|
||||
}
|
||||
50
vendor/k8s.io/kubernetes/pkg/volume/fc/BUILD
generated
vendored
Normal file
50
vendor/k8s.io/kubernetes/pkg/volume/fc/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"disk_manager.go",
|
||||
"doc.go",
|
||||
"fc.go",
|
||||
"fc_util.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"fc_test.go",
|
||||
"fc_util_test.go",
|
||||
],
|
||||
library = "go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/testing:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
2
vendor/k8s.io/kubernetes/pkg/volume/fc/OWNERS
generated
vendored
Normal file
2
vendor/k8s.io/kubernetes/pkg/volume/fc/OWNERS
generated
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
assignees:
|
||||
- rootfs
|
||||
118
vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go
generated
vendored
Normal file
118
vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go
generated
vendored
Normal file
|
|
@ -0,0 +1,118 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fc
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
// Abstract interface to disk operations.
|
||||
type diskManager interface {
|
||||
MakeGlobalPDName(disk fcDisk) string
|
||||
// Attaches the disk to the kubelet's host machine.
|
||||
AttachDisk(b fcDiskMounter) error
|
||||
// Detaches the disk from the kubelet's host machine.
|
||||
DetachDisk(disk fcDiskUnmounter, mntPath string) error
|
||||
}
|
||||
|
||||
// utility to mount a disk based filesystem
|
||||
func diskSetUp(manager diskManager, b fcDiskMounter, volPath string, mounter mount.Interface, fsGroup *int64) error {
|
||||
globalPDPath := manager.MakeGlobalPDName(*b.fcDisk)
|
||||
// TODO: handle failed mounts here.
|
||||
noMnt, err := mounter.IsLikelyNotMountPoint(volPath)
|
||||
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
glog.Errorf("cannot validate mountpoint: %s", volPath)
|
||||
return err
|
||||
}
|
||||
if !noMnt {
|
||||
return nil
|
||||
}
|
||||
if err := manager.AttachDisk(b); err != nil {
|
||||
glog.Errorf("failed to attach disk")
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(volPath, 0750); err != nil {
|
||||
glog.Errorf("failed to mkdir:%s", volPath)
|
||||
return err
|
||||
}
|
||||
// Perform a bind mount to the full path to allow duplicate mounts of the same disk.
|
||||
options := []string{"bind"}
|
||||
if b.readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
err = mounter.Mount(globalPDPath, volPath, "", options)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to bind mount:%s", globalPDPath)
|
||||
return err
|
||||
}
|
||||
|
||||
if !b.readOnly {
|
||||
volume.SetVolumeOwnership(&b, fsGroup)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// utility to tear down a disk based filesystem
|
||||
func diskTearDown(manager diskManager, c fcDiskUnmounter, volPath string, mounter mount.Interface) error {
|
||||
noMnt, err := mounter.IsLikelyNotMountPoint(volPath)
|
||||
if err != nil {
|
||||
glog.Errorf("cannot validate mountpoint %s", volPath)
|
||||
return err
|
||||
}
|
||||
if noMnt {
|
||||
return os.Remove(volPath)
|
||||
}
|
||||
|
||||
refs, err := mount.GetMountRefs(mounter, volPath)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get reference count %s", volPath)
|
||||
return err
|
||||
}
|
||||
if err := mounter.Unmount(volPath); err != nil {
|
||||
glog.Errorf("failed to unmount %s", volPath)
|
||||
return err
|
||||
}
|
||||
// If len(refs) is 1, then all bind mounts have been removed, and the
|
||||
// remaining reference is the global mount. It is safe to detach.
|
||||
if len(refs) == 1 {
|
||||
mntPath := refs[0]
|
||||
if err := manager.DetachDisk(c, mntPath); err != nil {
|
||||
glog.Errorf("failed to detach disk from %s", mntPath)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
noMnt, mntErr := mounter.IsLikelyNotMountPoint(volPath)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("isMountpoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if noMnt {
|
||||
if err := os.Remove(volPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
19
vendor/k8s.io/kubernetes/pkg/volume/fc/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/volume/fc/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package fc contains the internal representation of
|
||||
// Fibre Channel (fc) volumes.
|
||||
package fc // import "k8s.io/kubernetes/pkg/volume/fc"
|
||||
237
vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go
generated
vendored
Normal file
237
vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,237 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&fcPlugin{nil, exec.New()}}
|
||||
}
|
||||
|
||||
type fcPlugin struct {
|
||||
host volume.VolumeHost
|
||||
exe exec.Interface
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &fcPlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &fcPlugin{}
|
||||
|
||||
const (
|
||||
fcPluginName = "kubernetes.io/fc"
|
||||
)
|
||||
|
||||
func (plugin *fcPlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) GetPluginName() string {
|
||||
return fcPluginName
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// TargetWWNs are the FibreChannel target worldwide names
|
||||
return fmt.Sprintf("%v", volumeSource.TargetWWNs), nil
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
if (spec.Volume != nil && spec.Volume.FC == nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FC == nil) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) RequiresRemount() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
return []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
// Inject real implementations here, test through the internal function.
|
||||
return plugin.newMounterInternal(spec, pod.UID, &FCUtil{}, plugin.host.GetMounter())
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Mounter, error) {
|
||||
// fc volumes used directly in a pod have a ReadOnly flag set by the pod author.
|
||||
// fc volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
|
||||
fc, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if fc.Lun == nil {
|
||||
return nil, fmt.Errorf("empty lun")
|
||||
}
|
||||
|
||||
lun := strconv.Itoa(int(*fc.Lun))
|
||||
|
||||
return &fcDiskMounter{
|
||||
fcDisk: &fcDisk{
|
||||
podUID: podUID,
|
||||
volName: spec.Name(),
|
||||
wwns: fc.TargetWWNs,
|
||||
lun: lun,
|
||||
manager: manager,
|
||||
io: &osIOHandler{},
|
||||
plugin: plugin},
|
||||
fsType: fc.FSType,
|
||||
readOnly: readOnly,
|
||||
mounter: &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
// Inject real implementations here, test through the internal function.
|
||||
return plugin.newUnmounterInternal(volName, podUID, &FCUtil{}, plugin.host.GetMounter())
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) newUnmounterInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Unmounter, error) {
|
||||
return &fcDiskUnmounter{
|
||||
fcDisk: &fcDisk{
|
||||
podUID: podUID,
|
||||
volName: volName,
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
io: &osIOHandler{},
|
||||
},
|
||||
mounter: mounter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) execCommand(command string, args []string) ([]byte, error) {
|
||||
cmd := plugin.exe.Command(command, args...)
|
||||
return cmd.CombinedOutput()
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
fcVolume := &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
FC: &v1.FCVolumeSource{},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(fcVolume), nil
|
||||
}
|
||||
|
||||
type fcDisk struct {
|
||||
volName string
|
||||
podUID types.UID
|
||||
portal string
|
||||
wwns []string
|
||||
lun string
|
||||
plugin *fcPlugin
|
||||
// Utility interface that provides API calls to the provider to attach/detach disks.
|
||||
manager diskManager
|
||||
// io handler interface
|
||||
io ioHandler
|
||||
volume.MetricsNil
|
||||
}
|
||||
|
||||
func (fc *fcDisk) GetPath() string {
|
||||
name := fcPluginName
|
||||
// safe to use PodVolumeDir now: volume teardown occurs before pod is cleaned up
|
||||
return fc.plugin.host.GetPodVolumeDir(fc.podUID, strings.EscapeQualifiedNameForDisk(name), fc.volName)
|
||||
}
|
||||
|
||||
type fcDiskMounter struct {
|
||||
*fcDisk
|
||||
readOnly bool
|
||||
fsType string
|
||||
mounter *mount.SafeFormatAndMount
|
||||
}
|
||||
|
||||
var _ volume.Mounter = &fcDiskMounter{}
|
||||
|
||||
func (b *fcDiskMounter) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: b.readOnly,
|
||||
Managed: !b.readOnly,
|
||||
SupportsSELinux: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (b *fcDiskMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *fcDiskMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
func (b *fcDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
// diskSetUp checks mountpoints and prevent repeated calls
|
||||
err := diskSetUp(b.manager, *b, dir, b.mounter, fsGroup)
|
||||
if err != nil {
|
||||
glog.Errorf("fc: failed to setup")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type fcDiskUnmounter struct {
|
||||
*fcDisk
|
||||
mounter mount.Interface
|
||||
}
|
||||
|
||||
var _ volume.Unmounter = &fcDiskUnmounter{}
|
||||
|
||||
// Unmounts the bind mount, and detaches the disk only if the disk
|
||||
// resource was the last reference to that disk on the kubelet.
|
||||
func (c *fcDiskUnmounter) TearDown() error {
|
||||
return c.TearDownAt(c.GetPath())
|
||||
}
|
||||
|
||||
func (c *fcDiskUnmounter) TearDownAt(dir string) error {
|
||||
return diskTearDown(c.manager, *c, dir, c.mounter)
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.FCVolumeSource, bool, error) {
|
||||
if spec.Volume != nil && spec.Volume.FC != nil {
|
||||
return spec.Volume.FC, spec.Volume.FC.ReadOnly, nil
|
||||
} else if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.FC != nil {
|
||||
return spec.PersistentVolume.Spec.FC, spec.ReadOnly, nil
|
||||
}
|
||||
|
||||
return nil, false, fmt.Errorf("Spec does not reference a FibreChannel volume type")
|
||||
}
|
||||
288
vendor/k8s.io/kubernetes/pkg/volume/fc/fc_test.go
generated
vendored
Normal file
288
vendor/k8s.io/kubernetes/pkg/volume/fc/fc_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,288 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("fc_test")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/fc")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if plug.GetPluginName() != "kubernetes.io/fc" {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAccessModes(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("fc_test")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/fc")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if !contains(plug.GetAccessModes(), v1.ReadWriteOnce) || !contains(plug.GetAccessModes(), v1.ReadOnlyMany) {
|
||||
t.Errorf("Expected two AccessModeTypes: %s and %s", v1.ReadWriteOnce, v1.ReadOnlyMany)
|
||||
}
|
||||
}
|
||||
|
||||
func contains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
|
||||
for _, m := range modes {
|
||||
if m == mode {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type fakeDiskManager struct {
|
||||
tmpDir string
|
||||
attachCalled bool
|
||||
detachCalled bool
|
||||
}
|
||||
|
||||
func NewFakeDiskManager() *fakeDiskManager {
|
||||
return &fakeDiskManager{
|
||||
tmpDir: utiltesting.MkTmpdirOrDie("fc_test"),
|
||||
}
|
||||
}
|
||||
|
||||
func (fake *fakeDiskManager) Cleanup() {
|
||||
os.RemoveAll(fake.tmpDir)
|
||||
}
|
||||
|
||||
func (fake *fakeDiskManager) MakeGlobalPDName(disk fcDisk) string {
|
||||
return fake.tmpDir
|
||||
}
|
||||
func (fake *fakeDiskManager) AttachDisk(b fcDiskMounter) error {
|
||||
globalPath := b.manager.MakeGlobalPDName(*b.fcDisk)
|
||||
err := os.MkdirAll(globalPath, 0750)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Simulate the global mount so that the fakeMounter returns the
|
||||
// expected number of mounts for the attached disk.
|
||||
b.mounter.Mount(globalPath, globalPath, b.fsType, nil)
|
||||
|
||||
fake.attachCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fake *fakeDiskManager) DetachDisk(c fcDiskUnmounter, mntPath string) error {
|
||||
globalPath := c.manager.MakeGlobalPDName(*c.fcDisk)
|
||||
err := os.RemoveAll(globalPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fake.detachCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("fc_test")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/fc")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
fakeManager := NewFakeDiskManager()
|
||||
defer fakeManager.Cleanup()
|
||||
fakeMounter := &mount.FakeMounter{}
|
||||
mounter, err := plug.(*fcPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter: %v", err)
|
||||
}
|
||||
|
||||
path := mounter.GetPath()
|
||||
expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~fc/vol1", tmpDir)
|
||||
if path != expectedPath {
|
||||
t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
if !fakeManager.attachCalled {
|
||||
t.Errorf("Attach was not called")
|
||||
}
|
||||
|
||||
fakeManager2 := NewFakeDiskManager()
|
||||
defer fakeManager2.Cleanup()
|
||||
unmounter, err := plug.(*fcPlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager2, fakeMounter)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
t.Errorf("Got a nil Unmounter: %v", err)
|
||||
}
|
||||
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
t.Errorf("TearDown() failed, volume path still exists: %s", path)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
if !fakeManager2.detachCalled {
|
||||
t.Errorf("Detach was not called")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginVolume(t *testing.T) {
|
||||
lun := int32(0)
|
||||
vol := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
FC: &v1.FCVolumeSource{
|
||||
TargetWWNs: []string{"some_wwn"},
|
||||
FSType: "ext4",
|
||||
Lun: &lun,
|
||||
},
|
||||
},
|
||||
}
|
||||
doTestPlugin(t, volume.NewSpecFromVolume(vol))
|
||||
}
|
||||
|
||||
func TestPluginPersistentVolume(t *testing.T) {
|
||||
lun := int32(0)
|
||||
vol := &v1.PersistentVolume{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "vol1",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
FC: &v1.FCVolumeSource{
|
||||
TargetWWNs: []string{"some_wwn"},
|
||||
FSType: "ext4",
|
||||
Lun: &lun,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false))
|
||||
}
|
||||
|
||||
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("fc_test")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
lun := int32(0)
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "pvA",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
FC: &v1.FCVolumeSource{
|
||||
TargetWWNs: []string{"some_wwn"},
|
||||
FSType: "ext4",
|
||||
Lun: &lun,
|
||||
},
|
||||
},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
Name: "claimA",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
claim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "claimA",
|
||||
Namespace: "nsA",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pvA",
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimBound,
|
||||
},
|
||||
}
|
||||
|
||||
client := fake.NewSimpleClientset(pv, claim)
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
|
||||
plug, _ := plugMgr.FindPluginByName(fcPluginName)
|
||||
|
||||
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, true)
|
||||
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
|
||||
|
||||
if !mounter.GetAttributes().ReadOnly {
|
||||
t.Errorf("Expected true for mounter.IsReadOnly")
|
||||
}
|
||||
}
|
||||
202
vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go
generated
vendored
Normal file
202
vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go
generated
vendored
Normal file
|
|
@ -0,0 +1,202 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
type ioHandler interface {
|
||||
ReadDir(dirname string) ([]os.FileInfo, error)
|
||||
Lstat(name string) (os.FileInfo, error)
|
||||
EvalSymlinks(path string) (string, error)
|
||||
WriteFile(filename string, data []byte, perm os.FileMode) error
|
||||
}
|
||||
|
||||
type osIOHandler struct{}
|
||||
|
||||
func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
|
||||
return ioutil.ReadDir(dirname)
|
||||
}
|
||||
func (handler *osIOHandler) Lstat(name string) (os.FileInfo, error) {
|
||||
return os.Lstat(name)
|
||||
}
|
||||
func (handler *osIOHandler) EvalSymlinks(path string) (string, error) {
|
||||
return filepath.EvalSymlinks(path)
|
||||
}
|
||||
func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||
return ioutil.WriteFile(filename, data, perm)
|
||||
}
|
||||
|
||||
// given a disk path like /dev/sdx, find the devicemapper parent
|
||||
// TODO #23192 Convert this code to use the generic code in ../util
|
||||
// which is used by the iSCSI implementation
|
||||
func findMultipathDeviceMapper(disk string, io ioHandler) string {
|
||||
sys_path := "/sys/block/"
|
||||
if dirs, err := io.ReadDir(sys_path); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := f.Name()
|
||||
if strings.HasPrefix(name, "dm-") {
|
||||
if _, err1 := io.Lstat(sys_path + name + "/slaves/" + disk); err1 == nil {
|
||||
return "/dev/" + name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// given a wwn and lun, find the device and associated devicemapper parent
|
||||
func findDisk(wwn, lun string, io ioHandler) (string, string) {
|
||||
fc_path := "-fc-0x" + wwn + "-lun-" + lun
|
||||
dev_path := "/dev/disk/by-path/"
|
||||
if dirs, err := io.ReadDir(dev_path); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := f.Name()
|
||||
if strings.Contains(name, fc_path) {
|
||||
if disk, err1 := io.EvalSymlinks(dev_path + name); err1 == nil {
|
||||
arr := strings.Split(disk, "/")
|
||||
l := len(arr) - 1
|
||||
dev := arr[l]
|
||||
dm := findMultipathDeviceMapper(dev, io)
|
||||
return disk, dm
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
|
||||
func createMultipathConf(path string, io ioHandler) {
|
||||
if _, err := os.Lstat(path); err != nil {
|
||||
data := []byte(`defaults {
|
||||
find_multipaths yes
|
||||
user_friendly_names yes
|
||||
}
|
||||
|
||||
|
||||
blacklist {
|
||||
}
|
||||
`)
|
||||
io.WriteFile(path, data, 0664)
|
||||
}
|
||||
}
|
||||
|
||||
// rescan scsi bus
|
||||
func scsiHostRescan(io ioHandler) {
|
||||
scsi_path := "/sys/class/scsi_host/"
|
||||
if dirs, err := io.ReadDir(scsi_path); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := scsi_path + f.Name() + "/scan"
|
||||
data := []byte("- - -")
|
||||
io.WriteFile(name, data, 0666)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// make a directory like /var/lib/kubelet/plugins/kubernetes.io/pod/fc/target-lun-0
|
||||
func makePDNameInternal(host volume.VolumeHost, wwns []string, lun string) string {
|
||||
return path.Join(host.GetPluginDir(fcPluginName), wwns[0]+"-lun-"+lun)
|
||||
}
|
||||
|
||||
type FCUtil struct{}
|
||||
|
||||
func (util *FCUtil) MakeGlobalPDName(fc fcDisk) string {
|
||||
return makePDNameInternal(fc.plugin.host, fc.wwns, fc.lun)
|
||||
}
|
||||
|
||||
func searchDisk(wwns []string, lun string, io ioHandler) (string, string) {
|
||||
disk := ""
|
||||
dm := ""
|
||||
|
||||
rescaned := false
|
||||
// two-phase search:
|
||||
// first phase, search existing device path, if a multipath dm is found, exit loop
|
||||
// otherwise, in second phase, rescan scsi bus and search again, return with any findings
|
||||
for true {
|
||||
for _, wwn := range wwns {
|
||||
disk, dm = findDisk(wwn, lun, io)
|
||||
// if multipath device is found, break
|
||||
if dm != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
// if a dm is found, exit loop
|
||||
if rescaned || dm != "" {
|
||||
break
|
||||
}
|
||||
// rescan and search again
|
||||
// create multipath conf if it is not there
|
||||
createMultipathConf("/etc/multipath.conf", io)
|
||||
// rescan scsi bus
|
||||
scsiHostRescan(io)
|
||||
rescaned = true
|
||||
}
|
||||
return disk, dm
|
||||
}
|
||||
|
||||
func (util *FCUtil) AttachDisk(b fcDiskMounter) error {
|
||||
devicePath := ""
|
||||
wwns := b.wwns
|
||||
lun := b.lun
|
||||
io := b.io
|
||||
disk, dm := searchDisk(wwns, lun, io)
|
||||
// if no disk matches input wwn and lun, exit
|
||||
if disk == "" && dm == "" {
|
||||
return fmt.Errorf("no fc disk found")
|
||||
}
|
||||
|
||||
// if multipath devicemapper device is found, use it; otherwise use raw disk
|
||||
if dm != "" {
|
||||
devicePath = dm
|
||||
} else {
|
||||
devicePath = disk
|
||||
}
|
||||
// mount it
|
||||
globalPDPath := b.manager.MakeGlobalPDName(*b.fcDisk)
|
||||
noMnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath)
|
||||
if !noMnt {
|
||||
glog.Infof("fc: %s already mounted", globalPDPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
|
||||
return fmt.Errorf("fc: failed to mkdir %s, error", globalPDPath)
|
||||
}
|
||||
|
||||
err = b.mounter.FormatAndMount(devicePath, globalPDPath, b.fsType, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fc: failed to mount fc volume %s [%s] to %s, error %v", devicePath, b.fsType, globalPDPath, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (util *FCUtil) DetachDisk(c fcDiskUnmounter, mntPath string) error {
|
||||
if err := c.mounter.Unmount(mntPath); err != nil {
|
||||
return fmt.Errorf("fc detach disk: failed to unmount: %s\nError: %v", mntPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
91
vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util_test.go
generated
vendored
Normal file
91
vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,91 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fc
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type fakeFileInfo struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (fi *fakeFileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
|
||||
func (fi *fakeFileInfo) Size() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (fi *fakeFileInfo) Mode() os.FileMode {
|
||||
return 777
|
||||
}
|
||||
|
||||
func (fi *fakeFileInfo) ModTime() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
func (fi *fakeFileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (fi *fakeFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakeIOHandler struct{}
|
||||
|
||||
func (handler *fakeIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
|
||||
switch dirname {
|
||||
case "/dev/disk/by-path/":
|
||||
f := &fakeFileInfo{
|
||||
name: "pci-0000:41:00.0-fc-0x500a0981891b8dc5-lun-0",
|
||||
}
|
||||
return []os.FileInfo{f}, nil
|
||||
case "/sys/block/":
|
||||
f := &fakeFileInfo{
|
||||
name: "dm-1",
|
||||
}
|
||||
return []os.FileInfo{f}, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (handler *fakeIOHandler) Lstat(name string) (os.FileInfo, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (handler *fakeIOHandler) EvalSymlinks(path string) (string, error) {
|
||||
return "/dev/sda", nil
|
||||
}
|
||||
|
||||
func (handler *fakeIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestIoHandler(t *testing.T) {
|
||||
io := &fakeIOHandler{}
|
||||
wwns := []string{"500a0981891b8dc5"}
|
||||
lun := "0"
|
||||
disk, dm := searchDisk(wwns, lun, io)
|
||||
// if no disk matches input wwn and lun, exit
|
||||
if disk == "" && dm == "" {
|
||||
t.Errorf("no fc disk found")
|
||||
}
|
||||
}
|
||||
45
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/BUILD
generated
vendored
Normal file
45
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"flexvolume.go",
|
||||
"flexvolume_util.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["flexvolume_test.go"],
|
||||
library = "go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/testing:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
2
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/OWNERS
generated
vendored
Normal file
2
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/OWNERS
generated
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
maintainers:
|
||||
- nelcy
|
||||
432
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/flexvolume.go
generated
vendored
Normal file
432
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/flexvolume.go
generated
vendored
Normal file
|
|
@ -0,0 +1,432 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flexvolume
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
func ProbeVolumePlugins(pluginDir string) []volume.VolumePlugin {
|
||||
plugins := []volume.VolumePlugin{}
|
||||
|
||||
files, _ := ioutil.ReadDir(pluginDir)
|
||||
for _, f := range files {
|
||||
// only directories are counted as plugins
|
||||
// and pluginDir/dirname/dirname should be an executable
|
||||
// unless dirname contains '~' for escaping namespace
|
||||
// e.g. dirname = vendor~cifs
|
||||
// then, executable will be pluginDir/dirname/cifs
|
||||
if f.IsDir() {
|
||||
execPath := path.Join(pluginDir, f.Name())
|
||||
plugins = append(plugins, &flexVolumePlugin{driverName: utilstrings.UnescapePluginName(f.Name()), execPath: execPath})
|
||||
}
|
||||
}
|
||||
return plugins
|
||||
}
|
||||
|
||||
// FlexVolumePlugin object.
|
||||
type flexVolumePlugin struct {
|
||||
driverName string
|
||||
execPath string
|
||||
host volume.VolumeHost
|
||||
}
|
||||
|
||||
// Init initializes the plugin.
|
||||
func (plugin *flexVolumePlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
// call the init script
|
||||
u := &flexVolumeUtil{}
|
||||
return u.init(plugin)
|
||||
}
|
||||
|
||||
func (plugin *flexVolumePlugin) getExecutable() string {
|
||||
parts := strings.Split(plugin.driverName, "/")
|
||||
execName := parts[len(parts)-1]
|
||||
return path.Join(plugin.execPath, execName)
|
||||
}
|
||||
|
||||
func (plugin *flexVolumePlugin) GetPluginName() string {
|
||||
return plugin.driverName
|
||||
}
|
||||
|
||||
func (plugin *flexVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return volumeSource.Driver, nil
|
||||
}
|
||||
|
||||
// CanSupport checks whether the plugin can support the input volume spec.
|
||||
func (plugin *flexVolumePlugin) CanSupport(spec *volume.Spec) bool {
|
||||
source, _, _ := getVolumeSource(spec)
|
||||
return (source != nil) && (source.Driver == plugin.driverName)
|
||||
}
|
||||
|
||||
func (plugin *flexVolumePlugin) RequiresRemount() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetAccessModes gets the allowed access modes for this plugin.
|
||||
func (plugin *flexVolumePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
return []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
}
|
||||
}
|
||||
|
||||
// NewMounter is the mounter routine to build the volume.
|
||||
func (plugin *flexVolumePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
fv, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
secrets := make(map[string]string)
|
||||
if fv.SecretRef != nil {
|
||||
kubeClient := plugin.host.GetKubeClient()
|
||||
if kubeClient == nil {
|
||||
return nil, fmt.Errorf("Cannot get kube client")
|
||||
}
|
||||
|
||||
secretName, err := kubeClient.Core().Secrets(pod.Namespace).Get(fv.SecretRef.Name)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Couldn't get secret %v/%v err: %v", pod.Namespace, fv.SecretRef, err)
|
||||
return nil, err
|
||||
}
|
||||
for name, data := range secretName.Data {
|
||||
secrets[name] = base64.StdEncoding.EncodeToString(data)
|
||||
glog.V(1).Infof("found flex volume secret info: %s", name)
|
||||
}
|
||||
}
|
||||
return plugin.newMounterInternal(spec, pod, &flexVolumeUtil{}, plugin.host.GetMounter(), exec.New(), secrets)
|
||||
}
|
||||
|
||||
// newMounterInternal is the internal mounter routine to build the volume.
|
||||
func (plugin *flexVolumePlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, manager flexVolumeManager, mounter mount.Interface, runner exec.Interface, secrets map[string]string) (volume.Mounter, error) {
|
||||
source, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &flexVolumeMounter{
|
||||
flexVolumeDisk: &flexVolumeDisk{
|
||||
podUID: pod.UID,
|
||||
podNamespace: pod.Namespace,
|
||||
podName: pod.Name,
|
||||
volName: spec.Name(),
|
||||
driverName: source.Driver,
|
||||
execPath: plugin.getExecutable(),
|
||||
mounter: mounter,
|
||||
plugin: plugin,
|
||||
secrets: secrets,
|
||||
},
|
||||
fsType: source.FSType,
|
||||
readOnly: source.ReadOnly,
|
||||
options: source.Options,
|
||||
runner: runner,
|
||||
manager: manager,
|
||||
blockDeviceMounter: &mount.SafeFormatAndMount{Interface: mounter, Runner: runner},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewUnmounter is the unmounter routine to clean the volume.
|
||||
func (plugin *flexVolumePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
return plugin.newUnmounterInternal(volName, podUID, &flexVolumeUtil{}, plugin.host.GetMounter(), exec.New())
|
||||
}
|
||||
|
||||
// newUnmounterInternal is the internal unmounter routine to clean the volume.
|
||||
func (plugin *flexVolumePlugin) newUnmounterInternal(volName string, podUID types.UID, manager flexVolumeManager, mounter mount.Interface, runner exec.Interface) (volume.Unmounter, error) {
|
||||
return &flexVolumeUnmounter{
|
||||
flexVolumeDisk: &flexVolumeDisk{
|
||||
podUID: podUID,
|
||||
volName: volName,
|
||||
driverName: plugin.driverName,
|
||||
execPath: plugin.getExecutable(),
|
||||
mounter: mounter,
|
||||
plugin: plugin,
|
||||
},
|
||||
runner: runner,
|
||||
manager: manager,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *flexVolumePlugin) ConstructVolumeSpec(volumeName, sourceName string) (*volume.Spec, error) {
|
||||
flexVolume := &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
FlexVolume: &v1.FlexVolumeSource{
|
||||
Driver: sourceName,
|
||||
},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(flexVolume), nil
|
||||
}
|
||||
|
||||
// flexVolume is the disk resource provided by this plugin.
|
||||
type flexVolumeDisk struct {
|
||||
// podUID is the UID of the pod.
|
||||
podUID types.UID
|
||||
// podNamespace is the namespace of the pod.
|
||||
podNamespace string
|
||||
// podName is the name of the pod.
|
||||
podName string
|
||||
// volName is the name of the pod volume.
|
||||
volName string
|
||||
// driverName is the name of the plugin driverName.
|
||||
driverName string
|
||||
// Driver executable used to setup the volume.
|
||||
execPath string
|
||||
// mounter provides the interface that is used to mount the actual
|
||||
// block device.
|
||||
mounter mount.Interface
|
||||
// secret for the volume.
|
||||
secrets map[string]string
|
||||
plugin *flexVolumePlugin
|
||||
}
|
||||
|
||||
// FlexVolumeUnmounter is the disk that will be cleaned by this plugin.
|
||||
type flexVolumeUnmounter struct {
|
||||
*flexVolumeDisk
|
||||
// Runner used to teardown the volume.
|
||||
runner exec.Interface
|
||||
// manager is the utility interface that provides API calls to the
|
||||
// driverName to setup & teardown disks
|
||||
manager flexVolumeManager
|
||||
volume.MetricsNil
|
||||
}
|
||||
|
||||
// FlexVolumeMounter is the disk that will be exposed by this plugin.
|
||||
type flexVolumeMounter struct {
|
||||
*flexVolumeDisk
|
||||
// fsType is the type of the filesystem to create on the volume.
|
||||
fsType string
|
||||
// readOnly specifies whether the disk will be setup as read-only.
|
||||
readOnly bool
|
||||
// options are the extra params that will be passed to the plugin
|
||||
// driverName.
|
||||
options map[string]string
|
||||
// Runner used to setup the volume.
|
||||
runner exec.Interface
|
||||
// manager is the utility interface that provides API calls to the
|
||||
// driverName to setup & teardown disks
|
||||
manager flexVolumeManager
|
||||
// blockDeviceMounter provides the interface to create filesystem if the
|
||||
// filesystem doesn't exist.
|
||||
blockDeviceMounter mount.Interface
|
||||
volume.MetricsNil
|
||||
}
|
||||
|
||||
// SetUp creates new directory.
|
||||
func (f *flexVolumeMounter) SetUp(fsGroup *int64) error {
|
||||
return f.SetUpAt(f.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
// GetAttributes get the flex volume attributes. The attributes will be queried
|
||||
// using plugin callout after we finalize the callout syntax.
|
||||
func (f flexVolumeMounter) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: f.readOnly,
|
||||
Managed: false,
|
||||
SupportsSELinux: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (f *flexVolumeMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// flexVolumeManager is the abstract interface to flex volume ops.
|
||||
type flexVolumeManager interface {
|
||||
// Attaches the disk to the kubelet's host machine.
|
||||
attach(mounter *flexVolumeMounter) (string, error)
|
||||
// Detaches the disk from the kubelet's host machine.
|
||||
detach(unmounter *flexVolumeUnmounter, dir string) error
|
||||
// Mounts the disk on the Kubelet's host machine.
|
||||
mount(mounter *flexVolumeMounter, mnt, dir string) error
|
||||
// Unmounts the disk from the Kubelet's host machine.
|
||||
unmount(unounter *flexVolumeUnmounter, dir string) error
|
||||
}
|
||||
|
||||
// SetUpAt creates new directory.
|
||||
func (f *flexVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
|
||||
notmnt, err := f.blockDeviceMounter.IsLikelyNotMountPoint(dir)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
glog.Errorf("Cannot validate mount point: %s %v", dir, err)
|
||||
return err
|
||||
}
|
||||
if !notmnt {
|
||||
return nil
|
||||
}
|
||||
|
||||
if f.options == nil {
|
||||
f.options = make(map[string]string)
|
||||
}
|
||||
|
||||
f.options[optionFSType] = f.fsType
|
||||
|
||||
// Read write mount options.
|
||||
if f.readOnly {
|
||||
f.options[optionReadWrite] = "ro"
|
||||
} else {
|
||||
f.options[optionReadWrite] = "rw"
|
||||
}
|
||||
|
||||
// Extract secret and pass it as options.
|
||||
for name, secret := range f.secrets {
|
||||
f.options[optionKeySecret+"/"+name] = secret
|
||||
}
|
||||
|
||||
glog.V(4).Infof("attempting to attach volume: %s with options %v", f.volName, f.options)
|
||||
device, err := f.manager.attach(f)
|
||||
if err != nil {
|
||||
if !isCmdNotSupportedErr(err) {
|
||||
glog.Errorf("failed to attach volume: %s", f.volName)
|
||||
return err
|
||||
}
|
||||
// Attach not supported or required. Continue to mount.
|
||||
}
|
||||
|
||||
glog.V(4).Infof("attempting to mount volume: %s", f.volName)
|
||||
if err := f.manager.mount(f, device, dir); err != nil {
|
||||
if !isCmdNotSupportedErr(err) {
|
||||
glog.Errorf("failed to mount volume: %s", f.volName)
|
||||
return err
|
||||
}
|
||||
options := make([]string, 0)
|
||||
|
||||
if f.readOnly {
|
||||
options = append(options, "ro")
|
||||
} else {
|
||||
options = append(options, "rw")
|
||||
}
|
||||
// Extract secret and pass it as options.
|
||||
for name, secret := range f.secrets {
|
||||
f.options[optionKeySecret+"/"+name] = secret
|
||||
}
|
||||
|
||||
os.MkdirAll(dir, 0750)
|
||||
// Mount not supported by driver. Use core mounting logic.
|
||||
glog.V(4).Infof("attempting to mount the volume: %s to device: %s", f.volName, device)
|
||||
err = f.blockDeviceMounter.Mount(string(device), dir, f.fsType, options)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to mount the volume: %s to device: %s, error: %v", f.volName, device, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Successfully mounted volume: %s on device: %s", f.volName, device)
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsReadOnly returns true if the volume is read only.
|
||||
func (f *flexVolumeMounter) IsReadOnly() bool {
|
||||
return f.readOnly
|
||||
}
|
||||
|
||||
// GetPathFromPlugin gets the actual volume mount directory based on plugin.
|
||||
func (f *flexVolumeDisk) GetPath() string {
|
||||
name := f.driverName
|
||||
return f.plugin.host.GetPodVolumeDir(f.podUID, utilstrings.EscapeQualifiedNameForDisk(name), f.volName)
|
||||
}
|
||||
|
||||
// TearDown simply deletes everything in the directory.
|
||||
func (f *flexVolumeUnmounter) TearDown() error {
|
||||
path := f.GetPath()
|
||||
return f.TearDownAt(path)
|
||||
}
|
||||
|
||||
// TearDownAt simply deletes everything in the directory.
|
||||
func (f *flexVolumeUnmounter) TearDownAt(dir string) error {
|
||||
|
||||
notmnt, err := f.mounter.IsLikelyNotMountPoint(dir)
|
||||
if err != nil {
|
||||
glog.Errorf("Error checking mount point %s, error: %v", dir, err)
|
||||
return err
|
||||
}
|
||||
if notmnt {
|
||||
return os.Remove(dir)
|
||||
}
|
||||
|
||||
device, refCount, err := mount.GetDeviceNameFromMount(f.mounter, dir)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get reference count for volume: %s", dir)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := f.manager.unmount(f, dir); err != nil {
|
||||
if !isCmdNotSupportedErr(err) {
|
||||
glog.Errorf("Failed to unmount volume %s", f.volName)
|
||||
return err
|
||||
}
|
||||
// Unmount not supported by the driver. Use core unmount logic.
|
||||
if err := f.mounter.Unmount(dir); err != nil {
|
||||
glog.Errorf("Failed to unmount volume: %s, error: %v", dir, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if refCount == 1 {
|
||||
if err := f.manager.detach(f, device); err != nil {
|
||||
if !isCmdNotSupportedErr(err) {
|
||||
glog.Errorf("Failed to teardown volume: %s, error: %v", dir, err)
|
||||
return err
|
||||
}
|
||||
// Teardown not supported by driver. Unmount is good enough.
|
||||
}
|
||||
}
|
||||
|
||||
notmnt, err = f.mounter.IsLikelyNotMountPoint(dir)
|
||||
if err != nil {
|
||||
glog.Errorf("Error checking mount point %s, error: %v", dir, err)
|
||||
return err
|
||||
}
|
||||
if notmnt {
|
||||
return os.Remove(dir)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.FlexVolumeSource, bool, error) {
|
||||
if spec.Volume != nil && spec.Volume.FlexVolume != nil {
|
||||
return spec.Volume.FlexVolume, spec.Volume.FlexVolume.ReadOnly, nil
|
||||
} else if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.FlexVolume != nil {
|
||||
return spec.PersistentVolume.Spec.FlexVolume, spec.ReadOnly, nil
|
||||
}
|
||||
|
||||
return nil, false, fmt.Errorf("Spec does not reference a Flex volume type")
|
||||
}
|
||||
417
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/flexvolume_test.go
generated
vendored
Normal file
417
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/flexvolume_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,417 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flexvolume
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"text/template"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
const execScriptTempl1 = `#!/bin/bash
|
||||
if [ "$1" == "init" -a $# -eq 1 ]; then
|
||||
echo -n '{
|
||||
"status": "Success"
|
||||
}'
|
||||
exit 0
|
||||
fi
|
||||
|
||||
PATH=$2
|
||||
if [ "$1" == "attach" -a $# -eq 2 ]; then
|
||||
echo -n '{
|
||||
"device": "{{.DevicePath}}",
|
||||
"status": "Success"
|
||||
}'
|
||||
exit 0
|
||||
elif [ "$1" == "detach" -a $# -eq 2 ]; then
|
||||
echo -n '{
|
||||
"status": "Success"
|
||||
}'
|
||||
exit 0
|
||||
elif [ "$1" == "mount" -a $# -eq 4 ]; then
|
||||
echo -n '{
|
||||
"status": "Not supported"
|
||||
}'
|
||||
exit 0
|
||||
elif [ "$1" == "unmount" -a $# -eq 2 ]; then
|
||||
echo -n '{
|
||||
"status": "Not supported"
|
||||
}'
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo -n '{
|
||||
"status": "Failure",
|
||||
"reason": "Invalid usage"
|
||||
}'
|
||||
exit 1
|
||||
|
||||
# Direct the arguments to a file to be tested against later
|
||||
echo -n $@ &> {{.OutputFile}}
|
||||
`
|
||||
|
||||
const execScriptTempl2 = `#!/bin/bash
|
||||
if [ "$1" == "init" -a $# -eq 1 ]; then
|
||||
echo -n '{
|
||||
"status": "Success"
|
||||
}'
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$1" == "attach" -a $# -eq 2 ]; then
|
||||
echo -n '{
|
||||
"status": "Not supported"
|
||||
}'
|
||||
exit 0
|
||||
elif [ "$1" == "detach" -a $# -eq 2 ]; then
|
||||
echo -n '{
|
||||
"status": "Not supported"
|
||||
}'
|
||||
exit 0
|
||||
elif [ "$1" == "mount" -a $# -eq 4 ]; then
|
||||
PATH=$2
|
||||
/bin/mkdir -p $PATH
|
||||
if [ $? -ne 0 ]; then
|
||||
echo -n '{
|
||||
"status": "Failure",
|
||||
"reason": "Failed to create $PATH"
|
||||
}'
|
||||
exit 1
|
||||
fi
|
||||
echo -n '{
|
||||
"status": "Success"
|
||||
}'
|
||||
exit 0
|
||||
elif [ "$1" == "unmount" -a $# -eq 2 ]; then
|
||||
PATH=$2
|
||||
/bin/rm -r $PATH
|
||||
if [ $? -ne 0 ]; then
|
||||
echo -n '{
|
||||
"status": "Failure",
|
||||
"reason": "Failed to cleanup $PATH"
|
||||
}'
|
||||
exit 1
|
||||
fi
|
||||
echo -n '{
|
||||
"status": "Success"
|
||||
}'
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo -n '{
|
||||
"status": "Failure",
|
||||
"reason": "Invalid usage"
|
||||
}'
|
||||
exit 1
|
||||
|
||||
# Direct the arguments to a file to be tested against later
|
||||
echo -n $@ &> {{.OutputFile}}
|
||||
`
|
||||
|
||||
func installPluginUnderTest(t *testing.T, vendorName, plugName, tmpDir string, execScriptTempl string, execTemplateData *map[string]interface{}) {
|
||||
vendoredName := plugName
|
||||
if vendorName != "" {
|
||||
vendoredName = fmt.Sprintf("%s~%s", vendorName, plugName)
|
||||
}
|
||||
pluginDir := path.Join(tmpDir, vendoredName)
|
||||
err := os.MkdirAll(pluginDir, 0777)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create plugin: %v", err)
|
||||
}
|
||||
pluginExec := path.Join(pluginDir, plugName)
|
||||
f, err := os.Create(pluginExec)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to install plugin")
|
||||
}
|
||||
err = f.Chmod(0777)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to set exec perms on plugin")
|
||||
}
|
||||
if execTemplateData == nil {
|
||||
execTemplateData = &map[string]interface{}{
|
||||
"DevicePath": "/dev/sdx",
|
||||
"OutputFile": path.Join(pluginDir, plugName+".out"),
|
||||
}
|
||||
}
|
||||
|
||||
tObj := template.Must(template.New("test").Parse(execScriptTempl))
|
||||
buf := &bytes.Buffer{}
|
||||
if err := tObj.Execute(buf, *execTemplateData); err != nil {
|
||||
t.Errorf("Error in executing script template - %v", err)
|
||||
}
|
||||
execScript := buf.String()
|
||||
_, err = f.WriteString(execScript)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to write plugin exec")
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("flexvolume_test")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
installPluginUnderTest(t, "kubernetes.io", "fakeAttacher", tmpDir, execScriptTempl1, nil)
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(tmpDir), volumetest.NewFakeVolumeHost("fake", nil, nil))
|
||||
plugin, err := plugMgr.FindPluginByName("kubernetes.io/fakeAttacher")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if plugin.GetPluginName() != "kubernetes.io/fakeAttacher" {
|
||||
t.Errorf("Wrong name: %s", plugin.GetPluginName())
|
||||
}
|
||||
if !plugin.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{FlexVolume: &v1.FlexVolumeSource{Driver: "kubernetes.io/fakeAttacher"}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
if !plugin.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{FlexVolume: &v1.FlexVolumeSource{Driver: "kubernetes.io/fakeAttacher"}}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
if plugin.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAccessModes(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("flexvolume_test")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
installPluginUnderTest(t, "kubernetes.io", "fakeAttacher", tmpDir, execScriptTempl1, nil)
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(tmpDir), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plugin, err := plugMgr.FindPersistentPluginByName("kubernetes.io/fakeAttacher")
|
||||
if err != nil {
|
||||
t.Fatalf("Can't find the plugin by name")
|
||||
}
|
||||
if !contains(plugin.GetAccessModes(), v1.ReadWriteOnce) || !contains(plugin.GetAccessModes(), v1.ReadOnlyMany) {
|
||||
t.Errorf("Expected two AccessModeTypes: %s and %s", v1.ReadWriteOnce, v1.ReadOnlyMany)
|
||||
}
|
||||
}
|
||||
|
||||
func contains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
|
||||
for _, m := range modes {
|
||||
if m == mode {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func doTestPluginAttachDetach(t *testing.T, spec *volume.Spec, tmpDir string) {
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
installPluginUnderTest(t, "kubernetes.io", "fakeAttacher", tmpDir, execScriptTempl1, nil)
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(tmpDir), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
plugin, err := plugMgr.FindPluginByName("kubernetes.io/fakeAttacher")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
fake := &mount.FakeMounter{}
|
||||
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
secretMap := make(map[string]string)
|
||||
secretMap["flexsecret"] = base64.StdEncoding.EncodeToString([]byte("foo"))
|
||||
mounter, err := plugin.(*flexVolumePlugin).newMounterInternal(spec, pod, &flexVolumeUtil{}, fake, exec.New(), secretMap)
|
||||
volumePath := mounter.GetPath()
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
path := mounter.GetPath()
|
||||
expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~fakeAttacher/vol1", tmpDir)
|
||||
if path != expectedPath {
|
||||
t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path)
|
||||
}
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(volumePath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
t.Logf("Setup successful")
|
||||
if mounter.(*flexVolumeMounter).readOnly {
|
||||
t.Errorf("The volume source should not be read-only and it is.")
|
||||
}
|
||||
|
||||
if len(fake.Log) != 1 {
|
||||
t.Errorf("Mount was not called exactly one time. It was called %d times.", len(fake.Log))
|
||||
} else {
|
||||
if fake.Log[0].Action != mount.FakeActionMount {
|
||||
t.Errorf("Unexpected mounter action: %#v", fake.Log[0])
|
||||
}
|
||||
}
|
||||
fake.ResetLog()
|
||||
|
||||
unmounter, err := plugin.(*flexVolumePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &flexVolumeUtil{}, fake, exec.New())
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
t.Errorf("Got a nil Unmounter")
|
||||
}
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(volumePath); err == nil {
|
||||
t.Errorf("TearDown() failed, volume path still exists: %s", volumePath)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
if len(fake.Log) != 1 {
|
||||
t.Errorf("Unmount was not called exactly one time. It was called %d times.", len(fake.Log))
|
||||
} else {
|
||||
if fake.Log[0].Action != mount.FakeActionUnmount {
|
||||
t.Errorf("Unexpected mounter action: %#v", fake.Log[0])
|
||||
}
|
||||
}
|
||||
|
||||
fake.ResetLog()
|
||||
}
|
||||
|
||||
func doTestPluginMountUnmount(t *testing.T, spec *volume.Spec, tmpDir string) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("flexvolume_test")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
installPluginUnderTest(t, "kubernetes.io", "fakeMounter", tmpDir, execScriptTempl2, nil)
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(tmpDir), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
plugin, err := plugMgr.FindPluginByName("kubernetes.io/fakeMounter")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
fake := &mount.FakeMounter{}
|
||||
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
// Use nil secret to test for nil secret case.
|
||||
mounter, err := plugin.(*flexVolumePlugin).newMounterInternal(spec, pod, &flexVolumeUtil{}, fake, exec.New(), nil)
|
||||
volumePath := mounter.GetPath()
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
path := mounter.GetPath()
|
||||
expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~fakeMounter/vol1", tmpDir)
|
||||
if path != expectedPath {
|
||||
t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path)
|
||||
}
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(volumePath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
t.Logf("Setup successful")
|
||||
if mounter.(*flexVolumeMounter).readOnly {
|
||||
t.Errorf("The volume source should not be read-only and it is.")
|
||||
}
|
||||
|
||||
unmounter, err := plugin.(*flexVolumePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &flexVolumeUtil{}, fake, exec.New())
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
t.Errorf("Got a nil Unmounter")
|
||||
}
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(volumePath); err == nil {
|
||||
t.Errorf("TearDown() failed, volume path still exists: %s", volumePath)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginVolumeAttacher(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("flexvolume_test")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
vol := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{FlexVolume: &v1.FlexVolumeSource{Driver: "kubernetes.io/fakeAttacher", ReadOnly: false}},
|
||||
}
|
||||
doTestPluginAttachDetach(t, volume.NewSpecFromVolume(vol), tmpDir)
|
||||
}
|
||||
|
||||
func TestPluginVolumeMounter(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("flexvolume_test")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
vol := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{FlexVolume: &v1.FlexVolumeSource{Driver: "kubernetes.io/fakeMounter", ReadOnly: false}},
|
||||
}
|
||||
doTestPluginMountUnmount(t, volume.NewSpecFromVolume(vol), tmpDir)
|
||||
}
|
||||
|
||||
func TestPluginPersistentVolume(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("flexvolume_test")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
vol := &v1.PersistentVolume{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "vol1",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
FlexVolume: &v1.FlexVolumeSource{Driver: "kubernetes.io/fakeAttacher", ReadOnly: false},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
doTestPluginAttachDetach(t, volume.NewSpecFromPersistentVolume(vol, false), tmpDir)
|
||||
}
|
||||
221
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/flexvolume_util.go
generated
vendored
Normal file
221
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/flexvolume_util.go
generated
vendored
Normal file
|
|
@ -0,0 +1,221 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flexvolume
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
)
|
||||
|
||||
const (
|
||||
initCmd = "init"
|
||||
attachCmd = "attach"
|
||||
detachCmd = "detach"
|
||||
mountCmd = "mount"
|
||||
unmountCmd = "unmount"
|
||||
|
||||
optionFSType = "kubernetes.io/fsType"
|
||||
optionReadWrite = "kubernetes.io/readwrite"
|
||||
optionKeySecret = "kubernetes.io/secret"
|
||||
)
|
||||
|
||||
const (
|
||||
// StatusSuccess represents the successful completion of command.
|
||||
StatusSuccess = "Success"
|
||||
// StatusFailed represents that the command failed.
|
||||
StatusFailure = "Failed"
|
||||
// StatusNotSupported represents that the command is not supported.
|
||||
StatusNotSupported = "Not supported"
|
||||
)
|
||||
|
||||
// FlexVolumeDriverStatus represents the return value of the driver callout.
|
||||
type FlexVolumeDriverStatus struct {
|
||||
// Status of the callout. One of "Success" or "Failure".
|
||||
Status string
|
||||
// Message is the reason for failure.
|
||||
Message string
|
||||
// Device assigned by the driver.
|
||||
Device string `json:"device"`
|
||||
}
|
||||
|
||||
// flexVolumeUtil is the utility structure to setup and teardown devices from
|
||||
// the host.
|
||||
type flexVolumeUtil struct{}
|
||||
|
||||
// isCmdNotSupportedErr checks if the error corresponds to command not supported by
|
||||
// driver.
|
||||
func isCmdNotSupportedErr(err error) bool {
|
||||
if err.Error() == StatusNotSupported {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// handleCmdResponse processes the command output and returns the appropriate
|
||||
// error code or message.
|
||||
func handleCmdResponse(cmd string, output []byte) (*FlexVolumeDriverStatus, error) {
|
||||
var status FlexVolumeDriverStatus
|
||||
if err := json.Unmarshal(output, &status); err != nil {
|
||||
glog.Errorf("Failed to unmarshal output for command: %s, output: %s, error: %s", cmd, output, err.Error())
|
||||
return nil, err
|
||||
} else if status.Status == StatusNotSupported {
|
||||
glog.V(5).Infof("%s command is not supported by the driver", cmd)
|
||||
return nil, errors.New(status.Status)
|
||||
} else if status.Status != StatusSuccess {
|
||||
errMsg := fmt.Sprintf("%s command failed, status: %s, reason: %s", cmd, status.Status, status.Message)
|
||||
glog.Errorf(errMsg)
|
||||
return nil, fmt.Errorf("%s", errMsg)
|
||||
}
|
||||
|
||||
return &status, nil
|
||||
}
|
||||
|
||||
// init initializes the plugin.
|
||||
func (u *flexVolumeUtil) init(plugin *flexVolumePlugin) error {
|
||||
// call the init script
|
||||
output, err := exec.New().Command(plugin.getExecutable(), initCmd).CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to init driver: %s, error: %s", plugin.driverName, err.Error())
|
||||
_, err := handleCmdResponse(initCmd, output)
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(5).Infof("Successfully initialized driver %s", plugin.driverName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Attach exposes a volume on the host.
|
||||
func (u *flexVolumeUtil) attach(f *flexVolumeMounter) (string, error) {
|
||||
execPath := f.execPath
|
||||
|
||||
var options string
|
||||
if f.options != nil {
|
||||
out, err := json.Marshal(f.options)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to marshal plugin options, error: %s", err.Error())
|
||||
return "", err
|
||||
}
|
||||
if len(out) != 0 {
|
||||
options = string(out)
|
||||
} else {
|
||||
options = ""
|
||||
}
|
||||
}
|
||||
|
||||
cmd := f.runner.Command(execPath, attachCmd, options)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to attach volume %s, output: %s, error: %s", f.volName, output, err.Error())
|
||||
_, err := handleCmdResponse(attachCmd, output)
|
||||
return "", err
|
||||
}
|
||||
|
||||
status, err := handleCmdResponse(attachCmd, output)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
glog.Infof("Successfully attached volume %s on device: %s", f.volName, status.Device)
|
||||
|
||||
return status.Device, nil
|
||||
}
|
||||
|
||||
// Detach detaches a volume from the host.
|
||||
func (u *flexVolumeUtil) detach(f *flexVolumeUnmounter, mntDevice string) error {
|
||||
execPath := f.execPath
|
||||
|
||||
// Executable provider command.
|
||||
cmd := f.runner.Command(execPath, detachCmd, mntDevice)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to detach volume %s, output: %s, error: %s", f.volName, output, err.Error())
|
||||
_, err := handleCmdResponse(detachCmd, output)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = handleCmdResponse(detachCmd, output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.Infof("Successfully detached volume %s on device: %s", f.volName, mntDevice)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mount mounts the volume on the host.
|
||||
func (u *flexVolumeUtil) mount(f *flexVolumeMounter, mntDevice, dir string) error {
|
||||
execPath := f.execPath
|
||||
|
||||
var options string
|
||||
if f.options != nil {
|
||||
out, err := json.Marshal(f.options)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to marshal plugin options, error: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
if len(out) != 0 {
|
||||
options = string(out)
|
||||
} else {
|
||||
options = ""
|
||||
}
|
||||
}
|
||||
|
||||
// Executable provider command.
|
||||
cmd := f.runner.Command(execPath, mountCmd, dir, mntDevice, options)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to mount volume %s, output: %s, error: %s", f.volName, output, err.Error())
|
||||
_, err := handleCmdResponse(mountCmd, output)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = handleCmdResponse(mountCmd, output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.Infof("Successfully mounted volume %s on dir: %s", f.volName, dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmount unmounts the volume on the host.
|
||||
func (u *flexVolumeUtil) unmount(f *flexVolumeUnmounter, dir string) error {
|
||||
execPath := f.execPath
|
||||
|
||||
// Executable provider command.
|
||||
cmd := f.runner.Command(execPath, unmountCmd, dir)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to unmount volume %s, output: %s, error: %s", f.volName, output, err.Error())
|
||||
_, err := handleCmdResponse(unmountCmd, output)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = handleCmdResponse(unmountCmd, output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.Infof("Successfully unmounted volume %s on dir: %s", f.volName, dir)
|
||||
return nil
|
||||
}
|
||||
56
vendor/k8s.io/kubernetes/pkg/volume/flocker/BUILD
generated
vendored
Normal file
56
vendor/k8s.io/kubernetes/pkg/volume/flocker/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"flocker.go",
|
||||
"flocker_util.go",
|
||||
"flocker_volume.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/env:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/rand:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor:github.com/clusterhq/flocker-go",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"flocker_test.go",
|
||||
"flocker_util_test.go",
|
||||
"flocker_volume_test.go",
|
||||
],
|
||||
library = "go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/apis/meta/v1:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/testing:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor:github.com/clusterhq/flocker-go",
|
||||
"//vendor:github.com/stretchr/testify/assert",
|
||||
],
|
||||
)
|
||||
3
vendor/k8s.io/kubernetes/pkg/volume/flocker/OWNERS
generated
vendored
Normal file
3
vendor/k8s.io/kubernetes/pkg/volume/flocker/OWNERS
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
maintainers:
|
||||
- agonzalezro
|
||||
- simonswine
|
||||
18
vendor/k8s.io/kubernetes/pkg/volume/flocker/doc.go
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/pkg/volume/flocker/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package flocker contains the internal representation of Flocker volumes
|
||||
package flocker // import "k8s.io/kubernetes/pkg/volume/flocker"
|
||||
474
vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker.go
generated
vendored
Normal file
474
vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker.go
generated
vendored
Normal file
|
|
@ -0,0 +1,474 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flocker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/env"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
|
||||
flockerapi "github.com/clusterhq/flocker-go"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&flockerPlugin{nil}}
|
||||
}
|
||||
|
||||
type flockerPlugin struct {
|
||||
host volume.VolumeHost
|
||||
}
|
||||
|
||||
type flockerVolume struct {
|
||||
volName string
|
||||
podUID types.UID
|
||||
// dataset metadata name deprecated
|
||||
datasetName string
|
||||
// dataset uuid
|
||||
datasetUUID string
|
||||
//pod *v1.Pod
|
||||
flockerClient flockerapi.Clientable
|
||||
manager volumeManager
|
||||
plugin *flockerPlugin
|
||||
mounter mount.Interface
|
||||
volume.MetricsProvider
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &flockerPlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &flockerPlugin{}
|
||||
var _ volume.DeletableVolumePlugin = &flockerPlugin{}
|
||||
var _ volume.ProvisionableVolumePlugin = &flockerPlugin{}
|
||||
|
||||
const (
|
||||
flockerPluginName = "kubernetes.io/flocker"
|
||||
|
||||
defaultHost = "localhost"
|
||||
defaultPort = 4523
|
||||
defaultCACertFile = "/etc/flocker/cluster.crt"
|
||||
defaultClientKeyFile = "/etc/flocker/apiuser.key"
|
||||
defaultClientCertFile = "/etc/flocker/apiuser.crt"
|
||||
defaultMountPath = "/flocker"
|
||||
|
||||
timeoutWaitingForVolume = 2 * time.Minute
|
||||
tickerWaitingForVolume = 5 * time.Second
|
||||
)
|
||||
|
||||
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
|
||||
return host.GetPodVolumeDir(uid, strings.EscapeQualifiedNameForDisk(flockerPluginName), volName)
|
||||
}
|
||||
|
||||
func makeGlobalFlockerPath(datasetUUID string) string {
|
||||
return path.Join(defaultMountPath, datasetUUID)
|
||||
}
|
||||
|
||||
func (p *flockerPlugin) Init(host volume.VolumeHost) error {
|
||||
p.host = host
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *flockerPlugin) GetPluginName() string {
|
||||
return flockerPluginName
|
||||
}
|
||||
|
||||
func (p *flockerPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return volumeSource.DatasetName, nil
|
||||
}
|
||||
|
||||
func (p *flockerPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Flocker != nil) ||
|
||||
(spec.Volume != nil && spec.Volume.Flocker != nil)
|
||||
}
|
||||
|
||||
func (p *flockerPlugin) RequiresRemount() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *flockerPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
return []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *flockerPlugin) getFlockerVolumeSource(spec *volume.Spec) (*v1.FlockerVolumeSource, bool) {
|
||||
// AFAIK this will always be r/w, but perhaps for the future it will be needed
|
||||
readOnly := false
|
||||
|
||||
if spec.Volume != nil && spec.Volume.Flocker != nil {
|
||||
return spec.Volume.Flocker, readOnly
|
||||
}
|
||||
return spec.PersistentVolume.Spec.Flocker, readOnly
|
||||
}
|
||||
|
||||
func (plugin *flockerPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
// Inject real implementations here, test through the internal function.
|
||||
return plugin.newMounterInternal(spec, pod.UID, &FlockerUtil{}, plugin.host.GetMounter())
|
||||
}
|
||||
|
||||
func (plugin *flockerPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager volumeManager, mounter mount.Interface) (volume.Mounter, error) {
|
||||
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
datasetName := volumeSource.DatasetName
|
||||
datasetUUID := volumeSource.DatasetUUID
|
||||
|
||||
return &flockerVolumeMounter{
|
||||
flockerVolume: &flockerVolume{
|
||||
podUID: podUID,
|
||||
volName: spec.Name(),
|
||||
datasetName: datasetName,
|
||||
datasetUUID: datasetUUID,
|
||||
mounter: mounter,
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
|
||||
},
|
||||
readOnly: readOnly}, nil
|
||||
}
|
||||
|
||||
func (p *flockerPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
// Inject real implementations here, test through the internal function.
|
||||
return p.newUnmounterInternal(volName, podUID, &FlockerUtil{}, p.host.GetMounter())
|
||||
}
|
||||
|
||||
func (p *flockerPlugin) newUnmounterInternal(volName string, podUID types.UID, manager volumeManager, mounter mount.Interface) (volume.Unmounter, error) {
|
||||
return &flockerVolumeUnmounter{&flockerVolume{
|
||||
podUID: podUID,
|
||||
volName: volName,
|
||||
manager: manager,
|
||||
mounter: mounter,
|
||||
plugin: p,
|
||||
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, p.host)),
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (p *flockerPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
flockerVolume := &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Flocker: &v1.FlockerVolumeSource{
|
||||
DatasetName: volumeName,
|
||||
},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(flockerVolume), nil
|
||||
}
|
||||
|
||||
func (b *flockerVolume) GetDatasetUUID() (datasetUUID string, err error) {
|
||||
|
||||
// return UUID if set
|
||||
if len(b.datasetUUID) > 0 {
|
||||
return b.datasetUUID, nil
|
||||
}
|
||||
|
||||
if b.flockerClient == nil {
|
||||
return "", fmt.Errorf("Flocker client is not initialized")
|
||||
}
|
||||
|
||||
// lookup in flocker API otherwise
|
||||
return b.flockerClient.GetDatasetID(b.datasetName)
|
||||
}
|
||||
|
||||
type flockerVolumeMounter struct {
|
||||
*flockerVolume
|
||||
readOnly bool
|
||||
}
|
||||
|
||||
func (b *flockerVolumeMounter) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: b.readOnly,
|
||||
Managed: false,
|
||||
SupportsSELinux: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (b *flockerVolumeMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *flockerVolumeMounter) GetPath() string {
|
||||
return getPath(b.podUID, b.volName, b.plugin.host)
|
||||
}
|
||||
|
||||
// SetUp bind mounts the disk global mount to the volume path.
|
||||
func (b *flockerVolumeMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
// newFlockerClient uses environment variables and pod attributes to return a
|
||||
// flocker client capable of talking with the Flocker control service.
|
||||
func (p *flockerPlugin) newFlockerClient(hostIP string) (*flockerapi.Client, error) {
|
||||
host := env.GetEnvAsStringOrFallback("FLOCKER_CONTROL_SERVICE_HOST", defaultHost)
|
||||
port, err := env.GetEnvAsIntOrFallback("FLOCKER_CONTROL_SERVICE_PORT", defaultPort)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
caCertPath := env.GetEnvAsStringOrFallback("FLOCKER_CONTROL_SERVICE_CA_FILE", defaultCACertFile)
|
||||
keyPath := env.GetEnvAsStringOrFallback("FLOCKER_CONTROL_SERVICE_CLIENT_KEY_FILE", defaultClientKeyFile)
|
||||
certPath := env.GetEnvAsStringOrFallback("FLOCKER_CONTROL_SERVICE_CLIENT_CERT_FILE", defaultClientCertFile)
|
||||
|
||||
c, err := flockerapi.NewClient(host, port, hostIP, caCertPath, keyPath, certPath)
|
||||
return c, err
|
||||
}
|
||||
|
||||
func (b *flockerVolumeMounter) newFlockerClient() (*flockerapi.Client, error) {
|
||||
|
||||
hostIP, err := b.plugin.host.GetHostIP()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return b.plugin.newFlockerClient(hostIP.String())
|
||||
}
|
||||
|
||||
/*
|
||||
SetUpAt will setup a Flocker volume following this flow of calls to the Flocker
|
||||
control service:
|
||||
|
||||
1. Get the dataset id for the given volume name/dir
|
||||
2. It should already be there, if it's not the user needs to manually create it
|
||||
3. Check the current Primary UUID
|
||||
4. If it doesn't match with the Primary UUID that we got on 2, then we will
|
||||
need to update the Primary UUID for this volume.
|
||||
5. Wait until the Primary UUID was updated or timeout.
|
||||
*/
|
||||
func (b *flockerVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
var err error
|
||||
if b.flockerClient == nil {
|
||||
b.flockerClient, err = b.newFlockerClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
datasetUUID, err := b.GetDatasetUUID()
|
||||
if err != nil {
|
||||
return fmt.Errorf("The datasetUUID for volume with datasetName='%s' can not be found using flocker: %s", b.datasetName, err)
|
||||
}
|
||||
|
||||
datasetState, err := b.flockerClient.GetDatasetState(datasetUUID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("The datasetState for volume with datasetUUID='%s' could not determinted uusing flocker: %s", datasetUUID, err)
|
||||
}
|
||||
|
||||
primaryUUID, err := b.flockerClient.GetPrimaryUUID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if datasetState.Primary != primaryUUID {
|
||||
if err := b.updateDatasetPrimary(datasetUUID, primaryUUID); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := b.flockerClient.GetDatasetState(datasetUUID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("The volume with datasetUUID='%s' migrated unsuccessfully.", datasetUUID)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: handle failed mounts here.
|
||||
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
glog.V(4).Infof("flockerVolume set up: %s %v %v, datasetUUID %v readOnly %v", dir, !notMnt, err, datasetUUID, b.readOnly)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
glog.Errorf("cannot validate mount point: %s %v", dir, err)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
glog.Errorf("mkdir failed on disk %s (%v)", dir, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
|
||||
options := []string{"bind"}
|
||||
if b.readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
|
||||
globalFlockerPath := makeGlobalFlockerPath(datasetUUID)
|
||||
glog.V(4).Infof("attempting to mount %s", dir)
|
||||
|
||||
err = b.mounter.Mount(globalFlockerPath, dir, "", options)
|
||||
if err != nil {
|
||||
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("isLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
|
||||
glog.Errorf("failed to unmount: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("isLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
// This is very odd, we don't expect it. We'll try again next sync loop.
|
||||
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir)
|
||||
return err
|
||||
}
|
||||
}
|
||||
os.Remove(dir)
|
||||
glog.Errorf("mount of disk %s failed: %v", dir, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !b.readOnly {
|
||||
volume.SetVolumeOwnership(b, fsGroup)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("successfully mounted %s", dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateDatasetPrimary will update the primary in Flocker and wait for it to
|
||||
// be ready. If it never gets to ready state it will timeout and error.
|
||||
func (b *flockerVolumeMounter) updateDatasetPrimary(datasetUUID string, primaryUUID string) error {
|
||||
// We need to update the primary and wait for it to be ready
|
||||
_, err := b.flockerClient.UpdatePrimaryForDataset(primaryUUID, datasetUUID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
timeoutChan := time.NewTimer(timeoutWaitingForVolume)
|
||||
defer timeoutChan.Stop()
|
||||
tickChan := time.NewTicker(tickerWaitingForVolume)
|
||||
defer tickChan.Stop()
|
||||
|
||||
for {
|
||||
if s, err := b.flockerClient.GetDatasetState(datasetUUID); err == nil && s.Primary == primaryUUID {
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-timeoutChan.C:
|
||||
return fmt.Errorf(
|
||||
"Timed out waiting for the datasetUUID: '%s' to be moved to the primary: '%s'\n%v",
|
||||
datasetUUID, primaryUUID, err,
|
||||
)
|
||||
case <-tickChan.C:
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.FlockerVolumeSource, bool, error) {
|
||||
if spec.Volume != nil && spec.Volume.Flocker != nil {
|
||||
return spec.Volume.Flocker, spec.ReadOnly, nil
|
||||
} else if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.Flocker != nil {
|
||||
return spec.PersistentVolume.Spec.Flocker, spec.ReadOnly, nil
|
||||
}
|
||||
|
||||
return nil, false, fmt.Errorf("Spec does not reference a Flocker volume type")
|
||||
}
|
||||
|
||||
type flockerVolumeUnmounter struct {
|
||||
*flockerVolume
|
||||
}
|
||||
|
||||
var _ volume.Unmounter = &flockerVolumeUnmounter{}
|
||||
|
||||
func (c *flockerVolumeUnmounter) GetPath() string {
|
||||
return getPath(c.podUID, c.volName, c.plugin.host)
|
||||
}
|
||||
|
||||
// Unmounts the bind mount, and detaches the disk only if the PD
|
||||
// resource was the last reference to that disk on the kubelet.
|
||||
func (c *flockerVolumeUnmounter) TearDown() error {
|
||||
return c.TearDownAt(c.GetPath())
|
||||
}
|
||||
|
||||
// TearDownAt unmounts the bind mount
|
||||
func (c *flockerVolumeUnmounter) TearDownAt(dir string) error {
|
||||
notMnt, err := c.mounter.IsLikelyNotMountPoint(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if notMnt {
|
||||
return os.Remove(dir)
|
||||
}
|
||||
if err := c.mounter.Unmount(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
notMnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("isLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if notMnt {
|
||||
return os.Remove(dir)
|
||||
}
|
||||
return fmt.Errorf("Failed to unmount volume dir")
|
||||
}
|
||||
|
||||
func (plugin *flockerPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
||||
return plugin.newDeleterInternal(spec, &FlockerUtil{})
|
||||
}
|
||||
|
||||
func (plugin *flockerPlugin) newDeleterInternal(spec *volume.Spec, manager volumeManager) (volume.Deleter, error) {
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Flocker == nil {
|
||||
return nil, fmt.Errorf("spec.PersistentVolumeSource.Flocker is nil")
|
||||
}
|
||||
return &flockerVolumeDeleter{
|
||||
flockerVolume: &flockerVolume{
|
||||
volName: spec.Name(),
|
||||
datasetName: spec.PersistentVolume.Spec.Flocker.DatasetName,
|
||||
datasetUUID: spec.PersistentVolume.Spec.Flocker.DatasetUUID,
|
||||
manager: manager,
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (plugin *flockerPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
return plugin.newProvisionerInternal(options, &FlockerUtil{})
|
||||
}
|
||||
|
||||
func (plugin *flockerPlugin) newProvisionerInternal(options volume.VolumeOptions, manager volumeManager) (volume.Provisioner, error) {
|
||||
return &flockerVolumeProvisioner{
|
||||
flockerVolume: &flockerVolume{
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
},
|
||||
options: options,
|
||||
}, nil
|
||||
}
|
||||
359
vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_test.go
generated
vendored
Normal file
359
vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,359 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flocker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
|
||||
flockerapi "github.com/clusterhq/flocker-go"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const pluginName = "kubernetes.io/flocker"
|
||||
const datasetOneID = "11111111-1111-1111-1111-111111111100"
|
||||
const nodeOneID = "11111111-1111-1111-1111-111111111111"
|
||||
const nodeTwoID = "22222222-2222-2222-2222-222222222222"
|
||||
|
||||
var _ flockerapi.Clientable = &fakeFlockerClient{}
|
||||
|
||||
type fakeFlockerClient struct {
|
||||
DatasetID string
|
||||
Primary string
|
||||
Deleted bool
|
||||
Metadata map[string]string
|
||||
Nodes []flockerapi.NodeState
|
||||
Error error
|
||||
}
|
||||
|
||||
func newFakeFlockerClient() *fakeFlockerClient {
|
||||
return &fakeFlockerClient{
|
||||
DatasetID: datasetOneID,
|
||||
Primary: nodeOneID,
|
||||
Deleted: false,
|
||||
Metadata: map[string]string{"Name": "dataset-one"},
|
||||
Nodes: []flockerapi.NodeState{
|
||||
{
|
||||
Host: "1.2.3.4",
|
||||
UUID: nodeOneID,
|
||||
},
|
||||
{
|
||||
Host: "4.5.6.7",
|
||||
UUID: nodeTwoID,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *fakeFlockerClient) CreateDataset(options *flockerapi.CreateDatasetOptions) (*flockerapi.DatasetState, error) {
|
||||
|
||||
if c.Error != nil {
|
||||
return nil, c.Error
|
||||
}
|
||||
|
||||
return &flockerapi.DatasetState{
|
||||
DatasetID: c.DatasetID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *fakeFlockerClient) DeleteDataset(datasetID string) error {
|
||||
c.DatasetID = datasetID
|
||||
c.Deleted = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeFlockerClient) GetDatasetState(datasetID string) (*flockerapi.DatasetState, error) {
|
||||
return &flockerapi.DatasetState{}, nil
|
||||
}
|
||||
|
||||
func (c *fakeFlockerClient) GetDatasetID(metaName string) (datasetID string, err error) {
|
||||
if val, ok := c.Metadata["Name"]; !ok {
|
||||
return val, nil
|
||||
}
|
||||
return "", fmt.Errorf("No dataset with metadata X found")
|
||||
}
|
||||
|
||||
func (c *fakeFlockerClient) GetPrimaryUUID() (primaryUUID string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (c *fakeFlockerClient) ListNodes() (nodes []flockerapi.NodeState, err error) {
|
||||
return c.Nodes, nil
|
||||
}
|
||||
|
||||
func (c *fakeFlockerClient) UpdatePrimaryForDataset(primaryUUID, datasetID string) (*flockerapi.DatasetState, error) {
|
||||
return &flockerapi.DatasetState{}, nil
|
||||
}
|
||||
|
||||
type fakeFlockerUtil struct {
|
||||
}
|
||||
|
||||
func (fake *fakeFlockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID string, volumeSizeGB int, labels map[string]string, err error) {
|
||||
labels = make(map[string]string)
|
||||
labels["fakeflockerutil"] = "yes"
|
||||
return "test-flocker-volume-uuid", 3, labels, nil
|
||||
}
|
||||
|
||||
func (fake *fakeFlockerUtil) DeleteVolume(cd *flockerVolumeDeleter) error {
|
||||
if cd.datasetUUID != "test-flocker-volume-uuid" {
|
||||
return fmt.Errorf("Deleter got unexpected datasetUUID: %s", cd.datasetUUID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newInitializedVolumePlugMgr(t *testing.T) (*volume.VolumePluginMgr, string) {
|
||||
plugMgr := &volume.VolumePluginMgr{}
|
||||
dir, err := utiltesting.MkTmpdir("flocker")
|
||||
assert.NoError(t, err)
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(dir, nil, nil))
|
||||
return plugMgr, dir
|
||||
}
|
||||
|
||||
func TestPlugin(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("flockerTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/flocker")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
spec := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Flocker: &v1.FlockerVolumeSource{
|
||||
DatasetUUID: "uuid1",
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeManager := &fakeFlockerUtil{}
|
||||
fakeMounter := &mount.FakeMounter{}
|
||||
mounter, err := plug.(*flockerPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetByName(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
plugMgr, _ := newInitializedVolumePlugMgr(t)
|
||||
|
||||
plug, err := plugMgr.FindPluginByName(pluginName)
|
||||
assert.NotNil(plug, "Can't find the plugin by name")
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
plugMgr, _ := newInitializedVolumePlugMgr(t)
|
||||
|
||||
plug, err := plugMgr.FindPluginByName(pluginName)
|
||||
assert.NoError(err)
|
||||
|
||||
specs := map[*volume.Spec]bool{
|
||||
&volume.Spec{
|
||||
Volume: &v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Flocker: &v1.FlockerVolumeSource{},
|
||||
},
|
||||
},
|
||||
}: true,
|
||||
&volume.Spec{
|
||||
PersistentVolume: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Flocker: &v1.FlockerVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}: true,
|
||||
&volume.Spec{
|
||||
Volume: &v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{},
|
||||
},
|
||||
}: false,
|
||||
}
|
||||
|
||||
for spec, expected := range specs {
|
||||
actual := plug.CanSupport(spec)
|
||||
assert.Equal(expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFlockerVolumeSource(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
p := flockerPlugin{}
|
||||
|
||||
spec := &volume.Spec{
|
||||
Volume: &v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Flocker: &v1.FlockerVolumeSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
vs, ro := p.getFlockerVolumeSource(spec)
|
||||
assert.False(ro)
|
||||
assert.Equal(spec.Volume.Flocker, vs)
|
||||
|
||||
spec = &volume.Spec{
|
||||
PersistentVolume: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Flocker: &v1.FlockerVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
vs, ro = p.getFlockerVolumeSource(spec)
|
||||
assert.False(ro)
|
||||
assert.Equal(spec.PersistentVolume.Spec.Flocker, vs)
|
||||
}
|
||||
|
||||
func TestNewMounterDatasetName(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
plugMgr, _ := newInitializedVolumePlugMgr(t)
|
||||
plug, err := plugMgr.FindPluginByName(pluginName)
|
||||
assert.NoError(err)
|
||||
|
||||
spec := &volume.Spec{
|
||||
Volume: &v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Flocker: &v1.FlockerVolumeSource{
|
||||
DatasetName: "something",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err = plug.NewMounter(spec, &v1.Pod{}, volume.VolumeOptions{})
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
func TestNewMounterDatasetUUID(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
plugMgr, _ := newInitializedVolumePlugMgr(t)
|
||||
plug, err := plugMgr.FindPluginByName(pluginName)
|
||||
assert.NoError(err)
|
||||
|
||||
spec := &volume.Spec{
|
||||
Volume: &v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Flocker: &v1.FlockerVolumeSource{
|
||||
DatasetUUID: "uuid1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
mounter, err := plug.NewMounter(spec, &v1.Pod{}, volume.VolumeOptions{})
|
||||
assert.NoError(err)
|
||||
assert.NotNil(mounter, "got a nil mounter")
|
||||
|
||||
}
|
||||
|
||||
func TestNewUnmounter(t *testing.T) {
|
||||
t.Skip("broken")
|
||||
assert := assert.New(t)
|
||||
|
||||
p := flockerPlugin{}
|
||||
|
||||
unmounter, err := p.NewUnmounter("", types.UID(""))
|
||||
assert.Nil(unmounter)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
func TestIsReadOnly(t *testing.T) {
|
||||
b := &flockerVolumeMounter{readOnly: true}
|
||||
assert.True(t, b.GetAttributes().ReadOnly)
|
||||
}
|
||||
|
||||
type mockFlockerClient struct {
|
||||
datasetID, primaryUUID, path string
|
||||
datasetState *flockerapi.DatasetState
|
||||
}
|
||||
|
||||
func newMockFlockerClient(mockDatasetID, mockPrimaryUUID, mockPath string) *mockFlockerClient {
|
||||
return &mockFlockerClient{
|
||||
datasetID: mockDatasetID,
|
||||
primaryUUID: mockPrimaryUUID,
|
||||
path: mockPath,
|
||||
datasetState: &flockerapi.DatasetState{
|
||||
Path: mockPath,
|
||||
DatasetID: mockDatasetID,
|
||||
Primary: mockPrimaryUUID,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m mockFlockerClient) CreateDataset(metaName string) (*flockerapi.DatasetState, error) {
|
||||
return m.datasetState, nil
|
||||
}
|
||||
func (m mockFlockerClient) GetDatasetState(datasetID string) (*flockerapi.DatasetState, error) {
|
||||
return m.datasetState, nil
|
||||
}
|
||||
func (m mockFlockerClient) GetDatasetID(metaName string) (string, error) {
|
||||
return m.datasetID, nil
|
||||
}
|
||||
func (m mockFlockerClient) GetPrimaryUUID() (string, error) {
|
||||
return m.primaryUUID, nil
|
||||
}
|
||||
func (m mockFlockerClient) UpdatePrimaryForDataset(primaryUUID, datasetID string) (*flockerapi.DatasetState, error) {
|
||||
return m.datasetState, nil
|
||||
}
|
||||
|
||||
/*
|
||||
TODO: reenable after refactor
|
||||
func TestSetUpAtInternal(t *testing.T) {
|
||||
const dir = "dir"
|
||||
mockPath := "expected-to-be-set-properly" // package var
|
||||
expectedPath := mockPath
|
||||
|
||||
assert := assert.New(t)
|
||||
|
||||
plugMgr, rootDir := newInitializedVolumePlugMgr(t)
|
||||
if rootDir != "" {
|
||||
defer os.RemoveAll(rootDir)
|
||||
}
|
||||
plug, err := plugMgr.FindPluginByName(flockerPluginName)
|
||||
assert.NoError(err)
|
||||
|
||||
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
b := flockerVolumeMounter{flockerVolume: &flockerVolume{pod: pod, plugin: plug.(*flockerPlugin)}}
|
||||
b.client = newMockFlockerClient("dataset-id", "primary-uid", mockPath)
|
||||
|
||||
assert.NoError(b.SetUpAt(dir, nil))
|
||||
assert.Equal(expectedPath, b.flocker.path)
|
||||
}
|
||||
*/
|
||||
96
vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_util.go
generated
vendored
Normal file
96
vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_util.go
generated
vendored
Normal file
|
|
@ -0,0 +1,96 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flocker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/rand"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
|
||||
flockerapi "github.com/clusterhq/flocker-go"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
type FlockerUtil struct{}
|
||||
|
||||
func (util *FlockerUtil) DeleteVolume(d *flockerVolumeDeleter) error {
|
||||
var err error
|
||||
|
||||
if d.flockerClient == nil {
|
||||
d.flockerClient, err = d.plugin.newFlockerClient("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
datasetUUID, err := d.GetDatasetUUID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return d.flockerClient.DeleteDataset(datasetUUID)
|
||||
}
|
||||
|
||||
func (util *FlockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID string, volumeSizeGB int, labels map[string]string, err error) {
|
||||
|
||||
if c.flockerClient == nil {
|
||||
c.flockerClient, err = c.plugin.newFlockerClient("")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
nodes, err := c.flockerClient.ListNodes()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if len(nodes) < 1 {
|
||||
err = fmt.Errorf("no nodes found inside the flocker cluster to provision a dataset")
|
||||
return
|
||||
}
|
||||
|
||||
// select random node
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
node := nodes[rand.Intn(len(nodes))]
|
||||
glog.V(2).Infof("selected flocker node with UUID '%s' to provision dataset", node.UUID)
|
||||
|
||||
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
requestBytes := capacity.Value()
|
||||
volumeSizeGB = int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||
|
||||
createOptions := &flockerapi.CreateDatasetOptions{
|
||||
MaximumSize: requestBytes,
|
||||
Metadata: map[string]string{
|
||||
"type": "k8s-dynamic-prov",
|
||||
"pvc": c.options.PVC.Name,
|
||||
},
|
||||
Primary: node.UUID,
|
||||
}
|
||||
|
||||
datasetState, err := c.flockerClient.CreateDataset(createOptions)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
datasetUUID = datasetState.DatasetID
|
||||
|
||||
glog.V(2).Infof("successfully created Flocker dataset with UUID '%s'", datasetUUID)
|
||||
|
||||
return
|
||||
}
|
||||
55
vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_util_test.go
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_util_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flocker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFlockerUtil_CreateVolume(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
// test CreateVolume happy path
|
||||
pvc := volumetest.CreateTestPVC("3Gi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
|
||||
options := volume.VolumeOptions{
|
||||
PVC: pvc,
|
||||
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
|
||||
}
|
||||
|
||||
fakeFlockerClient := newFakeFlockerClient()
|
||||
provisioner := newTestableProvisioner(assert, options).(*flockerVolumeProvisioner)
|
||||
provisioner.flockerClient = fakeFlockerClient
|
||||
|
||||
flockerUtil := &FlockerUtil{}
|
||||
|
||||
datasetID, size, _, err := flockerUtil.CreateVolume(provisioner)
|
||||
assert.NoError(err)
|
||||
assert.Equal(datasetOneID, datasetID)
|
||||
assert.Equal(3, size)
|
||||
|
||||
// test error during CreateVolume
|
||||
fakeFlockerClient.Error = fmt.Errorf("Do not feel like provisioning")
|
||||
_, _, _, err = flockerUtil.CreateVolume(provisioner)
|
||||
assert.Equal(fakeFlockerClient.Error.Error(), err.Error())
|
||||
}
|
||||
105
vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_volume.go
generated
vendored
Normal file
105
vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_volume.go
generated
vendored
Normal file
|
|
@ -0,0 +1,105 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flocker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
type volumeManager interface {
|
||||
// Creates a volume
|
||||
CreateVolume(provisioner *flockerVolumeProvisioner) (datasetUUID string, volumeSizeGB int, labels map[string]string, err error)
|
||||
// Deletes a volume
|
||||
DeleteVolume(deleter *flockerVolumeDeleter) error
|
||||
}
|
||||
|
||||
type flockerVolumeDeleter struct {
|
||||
*flockerVolume
|
||||
}
|
||||
|
||||
var _ volume.Deleter = &flockerVolumeDeleter{}
|
||||
|
||||
func (b *flockerVolumeDeleter) GetPath() string {
|
||||
return getPath(b.podUID, b.volName, b.plugin.host)
|
||||
}
|
||||
|
||||
func (d *flockerVolumeDeleter) Delete() error {
|
||||
return d.manager.DeleteVolume(d)
|
||||
}
|
||||
|
||||
type flockerVolumeProvisioner struct {
|
||||
*flockerVolume
|
||||
options volume.VolumeOptions
|
||||
}
|
||||
|
||||
var _ volume.Provisioner = &flockerVolumeProvisioner{}
|
||||
|
||||
func (c *flockerVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
|
||||
if len(c.options.Parameters) > 0 {
|
||||
return nil, fmt.Errorf("Provisioning failed: Specified at least one unsupported parameter")
|
||||
}
|
||||
|
||||
if c.options.PVC.Spec.Selector != nil {
|
||||
return nil, fmt.Errorf("Provisioning failed: Specified unsupported selector")
|
||||
}
|
||||
|
||||
datasetUUID, sizeGB, labels, err := c.manager.CreateVolume(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: c.options.PVName,
|
||||
Labels: map[string]string{},
|
||||
Annotations: map[string]string{
|
||||
"kubernetes.io/createdby": "flocker-dynamic-provisioner",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
|
||||
AccessModes: c.options.PVC.Spec.AccessModes,
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Flocker: &v1.FlockerVolumeSource{
|
||||
DatasetUUID: datasetUUID,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if len(c.options.PVC.Spec.AccessModes) == 0 {
|
||||
pv.Spec.AccessModes = c.plugin.GetAccessModes()
|
||||
}
|
||||
|
||||
if len(labels) != 0 {
|
||||
if pv.Labels == nil {
|
||||
pv.Labels = make(map[string]string)
|
||||
}
|
||||
for k, v := range labels {
|
||||
pv.Labels[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return pv, nil
|
||||
}
|
||||
99
vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_volume_test.go
generated
vendored
Normal file
99
vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_volume_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,99 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flocker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func newTestableProvisioner(assert *assert.Assertions, options volume.VolumeOptions) volume.Provisioner {
|
||||
tmpDir, err := utiltesting.MkTmpdir("flockervolumeTest")
|
||||
assert.NoError(err, fmt.Sprintf("can't make a temp dir: %v", err))
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName(pluginName)
|
||||
assert.NoError(err, "Can't find the plugin by name")
|
||||
|
||||
provisioner, err := plug.(*flockerPlugin).newProvisionerInternal(options, &fakeFlockerUtil{})
|
||||
|
||||
return provisioner
|
||||
}
|
||||
|
||||
func TestProvision(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
pvc := volumetest.CreateTestPVC("3Gi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
|
||||
options := volume.VolumeOptions{
|
||||
PVC: pvc,
|
||||
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
|
||||
}
|
||||
|
||||
provisioner := newTestableProvisioner(assert, options)
|
||||
|
||||
persistentSpec, err := provisioner.Provision()
|
||||
assert.NoError(err, "Provision() failed: ", err)
|
||||
|
||||
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
|
||||
|
||||
assert.Equal(int64(3*1024*1024*1024), cap.Value())
|
||||
|
||||
assert.Equal(
|
||||
"test-flocker-volume-uuid",
|
||||
persistentSpec.Spec.PersistentVolumeSource.Flocker.DatasetUUID,
|
||||
)
|
||||
|
||||
assert.Equal(
|
||||
map[string]string{"fakeflockerutil": "yes"},
|
||||
persistentSpec.Labels,
|
||||
)
|
||||
|
||||
// parameters are not supported
|
||||
options = volume.VolumeOptions{
|
||||
PVC: pvc,
|
||||
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
|
||||
Parameters: map[string]string{
|
||||
"not-supported-params": "test123",
|
||||
},
|
||||
}
|
||||
|
||||
provisioner = newTestableProvisioner(assert, options)
|
||||
persistentSpec, err = provisioner.Provision()
|
||||
assert.Error(err, "Provision() did not fail with Parameters specified")
|
||||
|
||||
// selectors are not supported
|
||||
pvc.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"key": "value"}}
|
||||
options = volume.VolumeOptions{
|
||||
PVC: pvc,
|
||||
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
|
||||
}
|
||||
|
||||
provisioner = newTestableProvisioner(assert, options)
|
||||
persistentSpec, err = provisioner.Provision()
|
||||
assert.Error(err, "Provision() did not fail with Selector specified")
|
||||
|
||||
}
|
||||
56
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/BUILD
generated
vendored
Normal file
56
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"attacher.go",
|
||||
"doc.go",
|
||||
"gce_pd.go",
|
||||
"gce_util.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/sets:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"attacher_test.go",
|
||||
"gce_pd_test.go",
|
||||
],
|
||||
library = "go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/testing:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
3
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/OWNERS
generated
vendored
Normal file
3
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/OWNERS
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
maintainers:
|
||||
- saad-ali
|
||||
- thockin
|
||||
275
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/attacher.go
generated
vendored
Normal file
275
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/attacher.go
generated
vendored
Normal file
|
|
@ -0,0 +1,275 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce_pd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
type gcePersistentDiskAttacher struct {
|
||||
host volume.VolumeHost
|
||||
gceDisks gce.Disks
|
||||
}
|
||||
|
||||
var _ volume.Attacher = &gcePersistentDiskAttacher{}
|
||||
|
||||
var _ volume.AttachableVolumePlugin = &gcePersistentDiskPlugin{}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) NewAttacher() (volume.Attacher, error) {
|
||||
gceCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &gcePersistentDiskAttacher{
|
||||
host: plugin.host,
|
||||
gceDisks: gceCloud,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
|
||||
mounter := plugin.host.GetMounter()
|
||||
return mount.GetMountRefs(mounter, deviceMountPath)
|
||||
}
|
||||
|
||||
// Attach checks with the GCE cloud provider if the specified volume is already
|
||||
// attached to the node with the specified Name.
|
||||
// If the volume is attached, it succeeds (returns nil).
|
||||
// If it is not, Attach issues a call to the GCE cloud provider to attach it.
|
||||
// Callers are responsible for retrying on failure.
|
||||
// Callers are responsible for thread safety between concurrent attach and
|
||||
// detach operations.
|
||||
func (attacher *gcePersistentDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
pdName := volumeSource.PDName
|
||||
|
||||
attached, err := attacher.gceDisks.DiskIsAttached(pdName, nodeName)
|
||||
if err != nil {
|
||||
// Log error and continue with attach
|
||||
glog.Errorf(
|
||||
"Error checking if PD (%q) is already attached to current node (%q). Will continue and try attach anyway. err=%v",
|
||||
pdName, nodeName, err)
|
||||
}
|
||||
|
||||
if err == nil && attached {
|
||||
// Volume is already attached to node.
|
||||
glog.Infof("Attach operation is successful. PD %q is already attached to node %q.", pdName, nodeName)
|
||||
} else {
|
||||
if err := attacher.gceDisks.AttachDisk(pdName, nodeName, readOnly); err != nil {
|
||||
glog.Errorf("Error attaching PD %q to node %q: %+v", pdName, nodeName, err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
return path.Join(diskByIdPath, diskGooglePrefix+pdName), nil
|
||||
}
|
||||
|
||||
func (attacher *gcePersistentDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
|
||||
volumesAttachedCheck := make(map[*volume.Spec]bool)
|
||||
volumePdNameMap := make(map[string]*volume.Spec)
|
||||
pdNameList := []string{}
|
||||
for _, spec := range specs {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
// If error is occured, skip this volume and move to the next one
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
|
||||
continue
|
||||
}
|
||||
pdNameList = append(pdNameList, volumeSource.PDName)
|
||||
volumesAttachedCheck[spec] = true
|
||||
volumePdNameMap[volumeSource.PDName] = spec
|
||||
}
|
||||
attachedResult, err := attacher.gceDisks.DisksAreAttached(pdNameList, nodeName)
|
||||
if err != nil {
|
||||
// Log error and continue with attach
|
||||
glog.Errorf(
|
||||
"Error checking if PDs (%v) are already attached to current node (%q). err=%v",
|
||||
pdNameList, nodeName, err)
|
||||
return volumesAttachedCheck, err
|
||||
}
|
||||
|
||||
for pdName, attached := range attachedResult {
|
||||
if !attached {
|
||||
spec := volumePdNameMap[pdName]
|
||||
volumesAttachedCheck[spec] = false
|
||||
glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", pdName, spec.Name())
|
||||
}
|
||||
}
|
||||
return volumesAttachedCheck, nil
|
||||
}
|
||||
|
||||
func (attacher *gcePersistentDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, timeout time.Duration) (string, error) {
|
||||
ticker := time.NewTicker(checkSleepDuration)
|
||||
defer ticker.Stop()
|
||||
timer := time.NewTimer(timeout)
|
||||
defer timer.Stop()
|
||||
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
pdName := volumeSource.PDName
|
||||
partition := ""
|
||||
if volumeSource.Partition != 0 {
|
||||
partition = strconv.Itoa(int(volumeSource.Partition))
|
||||
}
|
||||
|
||||
sdBefore, err := filepath.Glob(diskSDPattern)
|
||||
if err != nil {
|
||||
glog.Errorf("Error filepath.Glob(\"%s\"): %v\r\n", diskSDPattern, err)
|
||||
}
|
||||
sdBeforeSet := sets.NewString(sdBefore...)
|
||||
|
||||
devicePaths := getDiskByIdPaths(pdName, partition)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
glog.V(5).Infof("Checking GCE PD %q is attached.", pdName)
|
||||
path, err := verifyDevicePath(devicePaths, sdBeforeSet)
|
||||
if err != nil {
|
||||
// Log error, if any, and continue checking periodically. See issue #11321
|
||||
glog.Errorf("Error verifying GCE PD (%q) is attached: %v", pdName, err)
|
||||
} else if path != "" {
|
||||
// A device path has successfully been created for the PD
|
||||
glog.Infof("Successfully found attached GCE PD %q.", pdName)
|
||||
return path, nil
|
||||
}
|
||||
case <-timer.C:
|
||||
return "", fmt.Errorf("Could not find attached GCE PD %q. Timeout waiting for mount paths to be created.", pdName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (attacher *gcePersistentDiskAttacher) GetDeviceMountPath(
|
||||
spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return makeGlobalPDName(attacher.host, volumeSource.PDName), nil
|
||||
}
|
||||
|
||||
func (attacher *gcePersistentDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
||||
// Only mount the PD globally once.
|
||||
mounter := attacher.host.GetMounter()
|
||||
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
|
||||
return err
|
||||
}
|
||||
notMnt = true
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := []string{}
|
||||
if readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
if notMnt {
|
||||
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}
|
||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, options)
|
||||
if err != nil {
|
||||
os.Remove(deviceMountPath)
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("formatting spec %v devicePath %v deviceMountPath %v fs %v with options %+v", spec.Name(), devicePath, deviceMountPath, volumeSource.FSType, options)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type gcePersistentDiskDetacher struct {
|
||||
host volume.VolumeHost
|
||||
gceDisks gce.Disks
|
||||
}
|
||||
|
||||
var _ volume.Detacher = &gcePersistentDiskDetacher{}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) NewDetacher() (volume.Detacher, error) {
|
||||
gceCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &gcePersistentDiskDetacher{
|
||||
host: plugin.host,
|
||||
gceDisks: gceCloud,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Detach checks with the GCE cloud provider if the specified volume is already
|
||||
// attached to the specified node. If the volume is not attached, it succeeds
|
||||
// (returns nil). If it is attached, Detach issues a call to the GCE cloud
|
||||
// provider to attach it.
|
||||
// Callers are responsible for retrying on failure.
|
||||
// Callers are responsible for thread safety between concurrent attach and detach
|
||||
// operations.
|
||||
func (detacher *gcePersistentDiskDetacher) Detach(deviceMountPath string, nodeName types.NodeName) error {
|
||||
pdName := path.Base(deviceMountPath)
|
||||
|
||||
attached, err := detacher.gceDisks.DiskIsAttached(pdName, nodeName)
|
||||
if err != nil {
|
||||
// Log error and continue with detach
|
||||
glog.Errorf(
|
||||
"Error checking if PD (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v",
|
||||
pdName, nodeName, err)
|
||||
}
|
||||
|
||||
if err == nil && !attached {
|
||||
// Volume is not attached to node. Success!
|
||||
glog.Infof("Detach operation is successful. PD %q was not attached to node %q.", pdName, nodeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = detacher.gceDisks.DetachDisk(pdName, nodeName); err != nil {
|
||||
glog.Errorf("Error detaching PD %q from node %q: %v", pdName, nodeName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (detacher *gcePersistentDiskDetacher) UnmountDevice(deviceMountPath string) error {
|
||||
return volumeutil.UnmountPath(deviceMountPath, detacher.host.GetMounter())
|
||||
}
|
||||
369
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/attacher_test.go
generated
vendored
Normal file
369
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/attacher_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,369 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce_pd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
func TestGetDeviceName_Volume(t *testing.T) {
|
||||
plugin := newPlugin()
|
||||
name := "my-pd-volume"
|
||||
spec := createVolSpec(name, false)
|
||||
|
||||
deviceName, err := plugin.GetVolumeName(spec)
|
||||
if err != nil {
|
||||
t.Errorf("GetDeviceName error: %v", err)
|
||||
}
|
||||
if deviceName != name {
|
||||
t.Errorf("GetDeviceName error: expected %s, got %s", name, deviceName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDeviceName_PersistentVolume(t *testing.T) {
|
||||
plugin := newPlugin()
|
||||
name := "my-pd-pv"
|
||||
spec := createPVSpec(name, true)
|
||||
|
||||
deviceName, err := plugin.GetVolumeName(spec)
|
||||
if err != nil {
|
||||
t.Errorf("GetDeviceName error: %v", err)
|
||||
}
|
||||
if deviceName != name {
|
||||
t.Errorf("GetDeviceName error: expected %s, got %s", name, deviceName)
|
||||
}
|
||||
}
|
||||
|
||||
// One testcase for TestAttachDetach table test below
|
||||
type testcase struct {
|
||||
name string
|
||||
// For fake GCE:
|
||||
attach attachCall
|
||||
detach detachCall
|
||||
diskIsAttached diskIsAttachedCall
|
||||
t *testing.T
|
||||
|
||||
// Actual test to run
|
||||
test func(test *testcase) error
|
||||
// Expected return of the test
|
||||
expectedReturn error
|
||||
}
|
||||
|
||||
func TestAttachDetach(t *testing.T) {
|
||||
diskName := "disk"
|
||||
nodeName := types.NodeName("instance")
|
||||
readOnly := false
|
||||
spec := createVolSpec(diskName, readOnly)
|
||||
attachError := errors.New("Fake attach error")
|
||||
detachError := errors.New("Fake detach error")
|
||||
diskCheckError := errors.New("Fake DiskIsAttached error")
|
||||
tests := []testcase{
|
||||
// Successful Attach call
|
||||
{
|
||||
name: "Attach_Positive",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, nil},
|
||||
attach: attachCall{diskName, nodeName, readOnly, nil},
|
||||
test: func(testcase *testcase) error {
|
||||
attacher := newAttacher(testcase)
|
||||
devicePath, err := attacher.Attach(spec, nodeName)
|
||||
if devicePath != "/dev/disk/by-id/google-disk" {
|
||||
return fmt.Errorf("devicePath incorrect. Expected<\"/dev/disk/by-id/google-disk\"> Actual: <%q>", devicePath)
|
||||
}
|
||||
return err
|
||||
},
|
||||
},
|
||||
|
||||
// Disk is already attached
|
||||
{
|
||||
name: "Attach_Positive_AlreadyAttached",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, true, nil},
|
||||
test: func(testcase *testcase) error {
|
||||
attacher := newAttacher(testcase)
|
||||
devicePath, err := attacher.Attach(spec, nodeName)
|
||||
if devicePath != "/dev/disk/by-id/google-disk" {
|
||||
return fmt.Errorf("devicePath incorrect. Expected<\"/dev/disk/by-id/google-disk\"> Actual: <%q>", devicePath)
|
||||
}
|
||||
return err
|
||||
},
|
||||
},
|
||||
|
||||
// DiskIsAttached fails and Attach succeeds
|
||||
{
|
||||
name: "Attach_Positive_CheckFails",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
|
||||
attach: attachCall{diskName, nodeName, readOnly, nil},
|
||||
test: func(testcase *testcase) error {
|
||||
attacher := newAttacher(testcase)
|
||||
devicePath, err := attacher.Attach(spec, nodeName)
|
||||
if devicePath != "/dev/disk/by-id/google-disk" {
|
||||
return fmt.Errorf("devicePath incorrect. Expected<\"/dev/disk/by-id/google-disk\"> Actual: <%q>", devicePath)
|
||||
}
|
||||
return err
|
||||
},
|
||||
},
|
||||
|
||||
// Attach call fails
|
||||
{
|
||||
name: "Attach_Negative",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
|
||||
attach: attachCall{diskName, nodeName, readOnly, attachError},
|
||||
test: func(testcase *testcase) error {
|
||||
attacher := newAttacher(testcase)
|
||||
devicePath, err := attacher.Attach(spec, nodeName)
|
||||
if devicePath != "" {
|
||||
return fmt.Errorf("devicePath incorrect. Expected<\"\"> Actual: <%q>", devicePath)
|
||||
}
|
||||
return err
|
||||
},
|
||||
expectedReturn: attachError,
|
||||
},
|
||||
|
||||
// Detach succeeds
|
||||
{
|
||||
name: "Detach_Positive",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, true, nil},
|
||||
detach: detachCall{diskName, nodeName, nil},
|
||||
test: func(testcase *testcase) error {
|
||||
detacher := newDetacher(testcase)
|
||||
return detacher.Detach(diskName, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
// Disk is already detached
|
||||
{
|
||||
name: "Detach_Positive_AlreadyDetached",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, nil},
|
||||
test: func(testcase *testcase) error {
|
||||
detacher := newDetacher(testcase)
|
||||
return detacher.Detach(diskName, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
// Detach succeeds when DiskIsAttached fails
|
||||
{
|
||||
name: "Detach_Positive_CheckFails",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
|
||||
detach: detachCall{diskName, nodeName, nil},
|
||||
test: func(testcase *testcase) error {
|
||||
detacher := newDetacher(testcase)
|
||||
return detacher.Detach(diskName, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
// Detach fails
|
||||
{
|
||||
name: "Detach_Negative",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
|
||||
detach: detachCall{diskName, nodeName, detachError},
|
||||
test: func(testcase *testcase) error {
|
||||
detacher := newDetacher(testcase)
|
||||
return detacher.Detach(diskName, nodeName)
|
||||
},
|
||||
expectedReturn: detachError,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range tests {
|
||||
testcase.t = t
|
||||
err := testcase.test(&testcase)
|
||||
if err != testcase.expectedReturn {
|
||||
t.Errorf("%s failed: expected err=%q, got %q", testcase.name, testcase.expectedReturn.Error(), err.Error())
|
||||
}
|
||||
t.Logf("Test %q succeeded", testcase.name)
|
||||
}
|
||||
}
|
||||
|
||||
// newPlugin creates a new gcePersistentDiskPlugin with fake cloud, NewAttacher
|
||||
// and NewDetacher won't work.
|
||||
func newPlugin() *gcePersistentDiskPlugin {
|
||||
host := volumetest.NewFakeVolumeHost(
|
||||
"/tmp", /* rootDir */
|
||||
nil, /* kubeClient */
|
||||
nil /* plugins */)
|
||||
plugins := ProbeVolumePlugins()
|
||||
plugin := plugins[0]
|
||||
plugin.Init(host)
|
||||
return plugin.(*gcePersistentDiskPlugin)
|
||||
}
|
||||
|
||||
func newAttacher(testcase *testcase) *gcePersistentDiskAttacher {
|
||||
return &gcePersistentDiskAttacher{
|
||||
host: nil,
|
||||
gceDisks: testcase,
|
||||
}
|
||||
}
|
||||
|
||||
func newDetacher(testcase *testcase) *gcePersistentDiskDetacher {
|
||||
return &gcePersistentDiskDetacher{
|
||||
gceDisks: testcase,
|
||||
}
|
||||
}
|
||||
|
||||
func createVolSpec(name string, readOnly bool) *volume.Spec {
|
||||
return &volume.Spec{
|
||||
Volume: &v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: name,
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createPVSpec(name string, readOnly bool) *volume.Spec {
|
||||
return &volume.Spec{
|
||||
PersistentVolume: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: name,
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Fake GCE implementation
|
||||
|
||||
type attachCall struct {
|
||||
diskName string
|
||||
nodeName types.NodeName
|
||||
readOnly bool
|
||||
ret error
|
||||
}
|
||||
|
||||
type detachCall struct {
|
||||
devicePath string
|
||||
nodeName types.NodeName
|
||||
ret error
|
||||
}
|
||||
|
||||
type diskIsAttachedCall struct {
|
||||
diskName string
|
||||
nodeName types.NodeName
|
||||
isAttached bool
|
||||
ret error
|
||||
}
|
||||
|
||||
func (testcase *testcase) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool) error {
|
||||
expected := &testcase.attach
|
||||
|
||||
if expected.diskName == "" && expected.nodeName == "" {
|
||||
// testcase.attach looks uninitialized, test did not expect to call
|
||||
// AttachDisk
|
||||
testcase.t.Errorf("Unexpected AttachDisk call!")
|
||||
return errors.New("Unexpected AttachDisk call!")
|
||||
}
|
||||
|
||||
if expected.diskName != diskName {
|
||||
testcase.t.Errorf("Unexpected AttachDisk call: expected diskName %s, got %s", expected.diskName, diskName)
|
||||
return errors.New("Unexpected AttachDisk call: wrong diskName")
|
||||
}
|
||||
|
||||
if expected.nodeName != nodeName {
|
||||
testcase.t.Errorf("Unexpected AttachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
|
||||
return errors.New("Unexpected AttachDisk call: wrong nodeName")
|
||||
}
|
||||
|
||||
if expected.readOnly != readOnly {
|
||||
testcase.t.Errorf("Unexpected AttachDisk call: expected readOnly %v, got %v", expected.readOnly, readOnly)
|
||||
return errors.New("Unexpected AttachDisk call: wrong readOnly")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("AttachDisk call: %s, %s, %v, returning %v", diskName, nodeName, readOnly, expected.ret)
|
||||
|
||||
return expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) DetachDisk(devicePath string, nodeName types.NodeName) error {
|
||||
expected := &testcase.detach
|
||||
|
||||
if expected.devicePath == "" && expected.nodeName == "" {
|
||||
// testcase.detach looks uninitialized, test did not expect to call
|
||||
// DetachDisk
|
||||
testcase.t.Errorf("Unexpected DetachDisk call!")
|
||||
return errors.New("Unexpected DetachDisk call!")
|
||||
}
|
||||
|
||||
if expected.devicePath != devicePath {
|
||||
testcase.t.Errorf("Unexpected DetachDisk call: expected devicePath %s, got %s", expected.devicePath, devicePath)
|
||||
return errors.New("Unexpected DetachDisk call: wrong diskName")
|
||||
}
|
||||
|
||||
if expected.nodeName != nodeName {
|
||||
testcase.t.Errorf("Unexpected DetachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
|
||||
return errors.New("Unexpected DetachDisk call: wrong nodeName")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", devicePath, nodeName, expected.ret)
|
||||
|
||||
return expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error) {
|
||||
expected := &testcase.diskIsAttached
|
||||
|
||||
if expected.diskName == "" && expected.nodeName == "" {
|
||||
// testcase.diskIsAttached looks uninitialized, test did not expect to
|
||||
// call DiskIsAttached
|
||||
testcase.t.Errorf("Unexpected DiskIsAttached call!")
|
||||
return false, errors.New("Unexpected DiskIsAttached call!")
|
||||
}
|
||||
|
||||
if expected.diskName != diskName {
|
||||
testcase.t.Errorf("Unexpected DiskIsAttached call: expected diskName %s, got %s", expected.diskName, diskName)
|
||||
return false, errors.New("Unexpected DiskIsAttached call: wrong diskName")
|
||||
}
|
||||
|
||||
if expected.nodeName != nodeName {
|
||||
testcase.t.Errorf("Unexpected DiskIsAttached call: expected nodeName %s, got %s", expected.nodeName, nodeName)
|
||||
return false, errors.New("Unexpected DiskIsAttached call: wrong nodeName")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, nodeName, expected.isAttached, expected.ret)
|
||||
|
||||
return expected.isAttached, expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
return nil, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (testcase *testcase) CreateDisk(name string, diskType string, zone string, sizeGb int64, tags map[string]string) error {
|
||||
return errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (testcase *testcase) DeleteDisk(diskToDelete string) error {
|
||||
return errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (testcase *testcase) GetAutoLabelsForPD(name string, zone string) (map[string]string, error) {
|
||||
return map[string]string{}, errors.New("Not implemented")
|
||||
}
|
||||
19
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package gce_pd contains the internal representation of GCE PersistentDisk
|
||||
// volumes.
|
||||
package gce_pd // import "k8s.io/kubernetes/pkg/volume/gce_pd"
|
||||
427
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go
generated
vendored
Normal file
427
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go
generated
vendored
Normal file
|
|
@ -0,0 +1,427 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce_pd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&gcePersistentDiskPlugin{nil}}
|
||||
}
|
||||
|
||||
type gcePersistentDiskPlugin struct {
|
||||
host volume.VolumeHost
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &gcePersistentDiskPlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &gcePersistentDiskPlugin{}
|
||||
var _ volume.DeletableVolumePlugin = &gcePersistentDiskPlugin{}
|
||||
var _ volume.ProvisionableVolumePlugin = &gcePersistentDiskPlugin{}
|
||||
|
||||
const (
|
||||
gcePersistentDiskPluginName = "kubernetes.io/gce-pd"
|
||||
)
|
||||
|
||||
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
|
||||
return host.GetPodVolumeDir(uid, strings.EscapeQualifiedNameForDisk(gcePersistentDiskPluginName), volName)
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) GetPluginName() string {
|
||||
return gcePersistentDiskPluginName
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return volumeSource.PDName, nil
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.GCEPersistentDisk != nil) ||
|
||||
(spec.Volume != nil && spec.Volume.GCEPersistentDisk != nil)
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) RequiresRemount() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
return []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
// Inject real implementations here, test through the internal function.
|
||||
return plugin.newMounterInternal(spec, pod.UID, &GCEDiskUtil{}, plugin.host.GetMounter())
|
||||
}
|
||||
|
||||
func getVolumeSource(
|
||||
spec *volume.Spec) (*v1.GCEPersistentDiskVolumeSource, bool, error) {
|
||||
if spec.Volume != nil && spec.Volume.GCEPersistentDisk != nil {
|
||||
return spec.Volume.GCEPersistentDisk, spec.Volume.GCEPersistentDisk.ReadOnly, nil
|
||||
} else if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.GCEPersistentDisk != nil {
|
||||
return spec.PersistentVolume.Spec.GCEPersistentDisk, spec.ReadOnly, nil
|
||||
}
|
||||
|
||||
return nil, false, fmt.Errorf("Spec does not reference a GCE volume type")
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Mounter, error) {
|
||||
// GCEPDs used directly in a pod have a ReadOnly flag set by the pod author.
|
||||
// GCEPDs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
|
||||
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pdName := volumeSource.PDName
|
||||
partition := ""
|
||||
if volumeSource.Partition != 0 {
|
||||
partition = strconv.Itoa(int(volumeSource.Partition))
|
||||
}
|
||||
|
||||
return &gcePersistentDiskMounter{
|
||||
gcePersistentDisk: &gcePersistentDisk{
|
||||
podUID: podUID,
|
||||
volName: spec.Name(),
|
||||
pdName: pdName,
|
||||
partition: partition,
|
||||
mounter: mounter,
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
|
||||
},
|
||||
readOnly: readOnly}, nil
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
// Inject real implementations here, test through the internal function.
|
||||
return plugin.newUnmounterInternal(volName, podUID, &GCEDiskUtil{}, plugin.host.GetMounter())
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) newUnmounterInternal(volName string, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Unmounter, error) {
|
||||
return &gcePersistentDiskUnmounter{&gcePersistentDisk{
|
||||
podUID: podUID,
|
||||
volName: volName,
|
||||
manager: manager,
|
||||
mounter: mounter,
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)),
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
||||
return plugin.newDeleterInternal(spec, &GCEDiskUtil{})
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) newDeleterInternal(spec *volume.Spec, manager pdManager) (volume.Deleter, error) {
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.GCEPersistentDisk == nil {
|
||||
return nil, fmt.Errorf("spec.PersistentVolumeSource.GCEPersistentDisk is nil")
|
||||
}
|
||||
return &gcePersistentDiskDeleter{
|
||||
gcePersistentDisk: &gcePersistentDisk{
|
||||
volName: spec.Name(),
|
||||
pdName: spec.PersistentVolume.Spec.GCEPersistentDisk.PDName,
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
return plugin.newProvisionerInternal(options, &GCEDiskUtil{})
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) newProvisionerInternal(options volume.VolumeOptions, manager pdManager) (volume.Provisioner, error) {
|
||||
return &gcePersistentDiskProvisioner{
|
||||
gcePersistentDisk: &gcePersistentDisk{
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
},
|
||||
options: options,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
mounter := plugin.host.GetMounter()
|
||||
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
|
||||
sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
gceVolume := &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: sourceName,
|
||||
},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(gceVolume), nil
|
||||
}
|
||||
|
||||
// Abstract interface to PD operations.
|
||||
type pdManager interface {
|
||||
// Creates a volume
|
||||
CreateVolume(provisioner *gcePersistentDiskProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, err error)
|
||||
// Deletes a volume
|
||||
DeleteVolume(deleter *gcePersistentDiskDeleter) error
|
||||
}
|
||||
|
||||
// gcePersistentDisk volumes are disk resources provided by Google Compute Engine
|
||||
// that are attached to the kubelet's host machine and exposed to the pod.
|
||||
type gcePersistentDisk struct {
|
||||
volName string
|
||||
podUID types.UID
|
||||
// Unique identifier of the PD, used to find the disk resource in the provider.
|
||||
pdName string
|
||||
// Specifies the partition to mount
|
||||
partition string
|
||||
// Utility interface to provision and delete disks
|
||||
manager pdManager
|
||||
// Mounter interface that provides system calls to mount the global path to the pod local path.
|
||||
mounter mount.Interface
|
||||
plugin *gcePersistentDiskPlugin
|
||||
volume.MetricsProvider
|
||||
}
|
||||
|
||||
type gcePersistentDiskMounter struct {
|
||||
*gcePersistentDisk
|
||||
// Specifies whether the disk will be mounted as read-only.
|
||||
readOnly bool
|
||||
}
|
||||
|
||||
var _ volume.Mounter = &gcePersistentDiskMounter{}
|
||||
|
||||
func (b *gcePersistentDiskMounter) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: b.readOnly,
|
||||
Managed: !b.readOnly,
|
||||
SupportsSELinux: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (b *gcePersistentDiskMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUp bind mounts the disk global mount to the volume path.
|
||||
func (b *gcePersistentDiskMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
// SetUp bind mounts the disk global mount to the give volume path.
|
||||
func (b *gcePersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
// TODO: handle failed mounts here.
|
||||
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
glog.V(4).Infof("GCE PersistentDisk set up: Dir (%s) PD name (%q) Mounted (%t) Error (%v), ReadOnly (%t)", dir, b.pdName, !notMnt, err, b.readOnly)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
glog.Errorf("cannot validate mount point: %s %v", dir, err)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
glog.Errorf("mkdir failed on disk %s (%v)", dir, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
|
||||
options := []string{"bind"}
|
||||
if b.readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
|
||||
globalPDPath := makeGlobalPDName(b.plugin.host, b.pdName)
|
||||
glog.V(4).Infof("attempting to mount %s", dir)
|
||||
|
||||
err = b.mounter.Mount(globalPDPath, dir, "", options)
|
||||
if err != nil {
|
||||
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
|
||||
glog.Errorf("Failed to unmount: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
// This is very odd, we don't expect it. We'll try again next sync loop.
|
||||
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir)
|
||||
return err
|
||||
}
|
||||
}
|
||||
os.Remove(dir)
|
||||
glog.Errorf("Mount of disk %s failed: %v", dir, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !b.readOnly {
|
||||
volume.SetVolumeOwnership(b, fsGroup)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Successfully mounted %s", dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeGlobalPDName(host volume.VolumeHost, devName string) string {
|
||||
return path.Join(host.GetPluginDir(gcePersistentDiskPluginName), mount.MountsInGlobalPDPath, devName)
|
||||
}
|
||||
|
||||
func (b *gcePersistentDiskMounter) GetPath() string {
|
||||
return getPath(b.podUID, b.volName, b.plugin.host)
|
||||
}
|
||||
|
||||
type gcePersistentDiskUnmounter struct {
|
||||
*gcePersistentDisk
|
||||
}
|
||||
|
||||
var _ volume.Unmounter = &gcePersistentDiskUnmounter{}
|
||||
|
||||
func (c *gcePersistentDiskUnmounter) GetPath() string {
|
||||
return getPath(c.podUID, c.volName, c.plugin.host)
|
||||
}
|
||||
|
||||
// Unmounts the bind mount, and detaches the disk only if the PD
|
||||
// resource was the last reference to that disk on the kubelet.
|
||||
func (c *gcePersistentDiskUnmounter) TearDown() error {
|
||||
return c.TearDownAt(c.GetPath())
|
||||
}
|
||||
|
||||
// TearDownAt unmounts the bind mount
|
||||
func (c *gcePersistentDiskUnmounter) TearDownAt(dir string) error {
|
||||
notMnt, err := c.mounter.IsLikelyNotMountPoint(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if notMnt {
|
||||
return os.Remove(dir)
|
||||
}
|
||||
if err := c.mounter.Unmount(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
notMnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if notMnt {
|
||||
return os.Remove(dir)
|
||||
}
|
||||
return fmt.Errorf("Failed to unmount volume dir")
|
||||
}
|
||||
|
||||
type gcePersistentDiskDeleter struct {
|
||||
*gcePersistentDisk
|
||||
}
|
||||
|
||||
var _ volume.Deleter = &gcePersistentDiskDeleter{}
|
||||
|
||||
func (d *gcePersistentDiskDeleter) GetPath() string {
|
||||
return getPath(d.podUID, d.volName, d.plugin.host)
|
||||
}
|
||||
|
||||
func (d *gcePersistentDiskDeleter) Delete() error {
|
||||
return d.manager.DeleteVolume(d)
|
||||
}
|
||||
|
||||
type gcePersistentDiskProvisioner struct {
|
||||
*gcePersistentDisk
|
||||
options volume.VolumeOptions
|
||||
}
|
||||
|
||||
var _ volume.Provisioner = &gcePersistentDiskProvisioner{}
|
||||
|
||||
func (c *gcePersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
volumeID, sizeGB, labels, err := c.manager.CreateVolume(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: c.options.PVName,
|
||||
Labels: map[string]string{},
|
||||
Annotations: map[string]string{
|
||||
"kubernetes.io/createdby": "gce-pd-dynamic-provisioner",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
|
||||
AccessModes: c.options.PVC.Spec.AccessModes,
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: volumeID,
|
||||
Partition: 0,
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if len(c.options.PVC.Spec.AccessModes) == 0 {
|
||||
pv.Spec.AccessModes = c.plugin.GetAccessModes()
|
||||
}
|
||||
|
||||
if len(labels) != 0 {
|
||||
if pv.Labels == nil {
|
||||
pv.Labels = make(map[string]string)
|
||||
}
|
||||
for k, v := range labels {
|
||||
pv.Labels[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return pv, nil
|
||||
}
|
||||
257
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd_test.go
generated
vendored
Normal file
257
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,257 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce_pd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("gcepdTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/gce-pd")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if plug.GetPluginName() != "kubernetes.io/gce-pd" {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAccessModes(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("gcepdTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/gce-pd")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if !contains(plug.GetAccessModes(), v1.ReadWriteOnce) || !contains(plug.GetAccessModes(), v1.ReadOnlyMany) {
|
||||
t.Errorf("Expected two AccessModeTypes: %s and %s", v1.ReadWriteOnce, v1.ReadOnlyMany)
|
||||
}
|
||||
}
|
||||
|
||||
func contains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
|
||||
for _, m := range modes {
|
||||
if m == mode {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type fakePDManager struct {
|
||||
}
|
||||
|
||||
func (fake *fakePDManager) CreateVolume(c *gcePersistentDiskProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, err error) {
|
||||
labels = make(map[string]string)
|
||||
labels["fakepdmanager"] = "yes"
|
||||
return "test-gce-volume-name", 100, labels, nil
|
||||
}
|
||||
|
||||
func (fake *fakePDManager) DeleteVolume(cd *gcePersistentDiskDeleter) error {
|
||||
if cd.pdName != "test-gce-volume-name" {
|
||||
return fmt.Errorf("Deleter got unexpected volume name: %s", cd.pdName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestPlugin(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("gcepdTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/gce-pd")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
spec := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "pd",
|
||||
FSType: "ext4",
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeManager := &fakePDManager{}
|
||||
fakeMounter := &mount.FakeMounter{}
|
||||
mounter, err := plug.(*gcePersistentDiskPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~gce-pd/vol1")
|
||||
path := mounter.GetPath()
|
||||
if path != volPath {
|
||||
t.Errorf("Got unexpected path: %s", path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
fakeManager = &fakePDManager{}
|
||||
unmounter, err := plug.(*gcePersistentDiskPlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager, fakeMounter)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
t.Errorf("Got a nil Unmounter")
|
||||
}
|
||||
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
t.Errorf("TearDown() failed, volume path still exists: %s", path)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
|
||||
// Test Provisioner
|
||||
options := volume.VolumeOptions{
|
||||
PVC: volumetest.CreateTestPVC("100Mi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
|
||||
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
|
||||
}
|
||||
provisioner, err := plug.(*gcePersistentDiskPlugin).newProvisionerInternal(options, &fakePDManager{})
|
||||
persistentSpec, err := provisioner.Provision()
|
||||
if err != nil {
|
||||
t.Errorf("Provision() failed: %v", err)
|
||||
}
|
||||
|
||||
if persistentSpec.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName != "test-gce-volume-name" {
|
||||
t.Errorf("Provision() returned unexpected volume ID: %s", persistentSpec.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName)
|
||||
}
|
||||
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
|
||||
size := cap.Value()
|
||||
if size != 100*1024*1024*1024 {
|
||||
t.Errorf("Provision() returned unexpected volume size: %v", size)
|
||||
}
|
||||
|
||||
if persistentSpec.Labels["fakepdmanager"] != "yes" {
|
||||
t.Errorf("Provision() returned unexpected labels: %v", persistentSpec.Labels)
|
||||
}
|
||||
|
||||
// Test Deleter
|
||||
volSpec := &volume.Spec{
|
||||
PersistentVolume: persistentSpec,
|
||||
}
|
||||
deleter, err := plug.(*gcePersistentDiskPlugin).newDeleterInternal(volSpec, &fakePDManager{})
|
||||
err = deleter.Delete()
|
||||
if err != nil {
|
||||
t.Errorf("Deleter() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "pvA",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{},
|
||||
},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
Name: "claimA",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
claim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "claimA",
|
||||
Namespace: "nsA",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pvA",
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimBound,
|
||||
},
|
||||
}
|
||||
|
||||
client := fake.NewSimpleClientset(pv, claim)
|
||||
|
||||
tmpDir, err := utiltesting.MkTmpdir("gcepdTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
|
||||
plug, _ := plugMgr.FindPluginByName(gcePersistentDiskPluginName)
|
||||
|
||||
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, true)
|
||||
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
|
||||
|
||||
if !mounter.GetAttributes().ReadOnly {
|
||||
t.Errorf("Expected true for mounter.IsReadOnly")
|
||||
}
|
||||
}
|
||||
250
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_util.go
generated
vendored
Normal file
250
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_util.go
generated
vendored
Normal file
|
|
@ -0,0 +1,250 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce_pd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
const (
|
||||
diskByIdPath = "/dev/disk/by-id/"
|
||||
diskGooglePrefix = "google-"
|
||||
diskScsiGooglePrefix = "scsi-0Google_PersistentDisk_"
|
||||
diskPartitionSuffix = "-part"
|
||||
diskSDPath = "/dev/sd"
|
||||
diskSDPattern = "/dev/sd*"
|
||||
maxChecks = 60
|
||||
maxRetries = 10
|
||||
checkSleepDuration = time.Second
|
||||
)
|
||||
|
||||
// These variables are modified only in unit tests and should be constant
|
||||
// otherwise.
|
||||
var (
|
||||
errorSleepDuration time.Duration = 5 * time.Second
|
||||
)
|
||||
|
||||
type GCEDiskUtil struct{}
|
||||
|
||||
func (util *GCEDiskUtil) DeleteVolume(d *gcePersistentDiskDeleter) error {
|
||||
cloud, err := getCloudProvider(d.gcePersistentDisk.plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = cloud.DeleteDisk(d.pdName); err != nil {
|
||||
glog.V(2).Infof("Error deleting GCE PD volume %s: %v", d.pdName, err)
|
||||
// GCE cloud provider returns volume.deletedVolumeInUseError when
|
||||
// necessary, no handling needed here.
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("Successfully deleted GCE PD volume %s", d.pdName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateVolume creates a GCE PD.
|
||||
// Returns: volumeID, volumeSizeGB, labels, error
|
||||
func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (string, int, map[string]string, error) {
|
||||
cloud, err := getCloudProvider(c.gcePersistentDisk.plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
return "", 0, nil, err
|
||||
}
|
||||
|
||||
name := volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 63) // GCE PD name can have up to 63 characters
|
||||
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
requestBytes := capacity.Value()
|
||||
// GCE works with gigabytes, convert to GiB with rounding up
|
||||
requestGB := volume.RoundUpSize(requestBytes, 1024*1024*1024)
|
||||
|
||||
// Apply Parameters (case-insensitive). We leave validation of
|
||||
// the values to the cloud provider.
|
||||
diskType := ""
|
||||
zone := ""
|
||||
for k, v := range c.options.Parameters {
|
||||
switch strings.ToLower(k) {
|
||||
case "type":
|
||||
diskType = v
|
||||
case "zone":
|
||||
zone = v
|
||||
default:
|
||||
return "", 0, nil, fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName())
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: implement PVC.Selector parsing
|
||||
if c.options.PVC.Spec.Selector != nil {
|
||||
return "", 0, nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on GCE")
|
||||
}
|
||||
|
||||
if zone == "" {
|
||||
// No zone specified, choose one randomly in the same region as the
|
||||
// node is running.
|
||||
zones, err := cloud.GetAllZones()
|
||||
if err != nil {
|
||||
glog.V(2).Infof("error getting zone information from GCE: %v", err)
|
||||
return "", 0, nil, err
|
||||
}
|
||||
zone = volume.ChooseZoneForVolume(zones, c.options.PVC.Name)
|
||||
}
|
||||
|
||||
err = cloud.CreateDisk(name, diskType, zone, int64(requestGB), *c.options.CloudTags)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Error creating GCE PD volume: %v", err)
|
||||
return "", 0, nil, err
|
||||
}
|
||||
glog.V(2).Infof("Successfully created GCE PD volume %s", name)
|
||||
|
||||
labels, err := cloud.GetAutoLabelsForPD(name, zone)
|
||||
if err != nil {
|
||||
// We don't really want to leak the volume here...
|
||||
glog.Errorf("error getting labels for volume %q: %v", name, err)
|
||||
}
|
||||
|
||||
return name, int(requestGB), labels, nil
|
||||
}
|
||||
|
||||
// Returns the first path that exists, or empty string if none exist.
|
||||
func verifyDevicePath(devicePaths []string, sdBeforeSet sets.String) (string, error) {
|
||||
if err := udevadmChangeToNewDrives(sdBeforeSet); err != nil {
|
||||
// udevadm errors should not block disk detachment, log and continue
|
||||
glog.Errorf("udevadmChangeToNewDrives failed with: %v", err)
|
||||
}
|
||||
|
||||
for _, path := range devicePaths {
|
||||
if pathExists, err := volumeutil.PathExists(path); err != nil {
|
||||
return "", fmt.Errorf("Error checking if path exists: %v", err)
|
||||
} else if pathExists {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Returns the first path that exists, or empty string if none exist.
|
||||
func verifyAllPathsRemoved(devicePaths []string) (bool, error) {
|
||||
allPathsRemoved := true
|
||||
for _, path := range devicePaths {
|
||||
if err := udevadmChangeToDrive(path); err != nil {
|
||||
// udevadm errors should not block disk detachment, log and continue
|
||||
glog.Errorf("%v", err)
|
||||
}
|
||||
if exists, err := volumeutil.PathExists(path); err != nil {
|
||||
return false, fmt.Errorf("Error checking if path exists: %v", err)
|
||||
} else {
|
||||
allPathsRemoved = allPathsRemoved && !exists
|
||||
}
|
||||
}
|
||||
|
||||
return allPathsRemoved, nil
|
||||
}
|
||||
|
||||
// Returns list of all /dev/disk/by-id/* paths for given PD.
|
||||
func getDiskByIdPaths(pdName string, partition string) []string {
|
||||
devicePaths := []string{
|
||||
path.Join(diskByIdPath, diskGooglePrefix+pdName),
|
||||
path.Join(diskByIdPath, diskScsiGooglePrefix+pdName),
|
||||
}
|
||||
|
||||
if partition != "" {
|
||||
for i, path := range devicePaths {
|
||||
devicePaths[i] = path + diskPartitionSuffix + partition
|
||||
}
|
||||
}
|
||||
|
||||
return devicePaths
|
||||
}
|
||||
|
||||
// Return cloud provider
|
||||
func getCloudProvider(cloudProvider cloudprovider.Interface) (*gcecloud.GCECloud, error) {
|
||||
var err error
|
||||
for numRetries := 0; numRetries < maxRetries; numRetries++ {
|
||||
gceCloudProvider, ok := cloudProvider.(*gcecloud.GCECloud)
|
||||
if !ok || gceCloudProvider == nil {
|
||||
// Retry on error. See issue #11321
|
||||
glog.Errorf("Failed to get GCE Cloud Provider. plugin.host.GetCloudProvider returned %v instead", cloudProvider)
|
||||
time.Sleep(errorSleepDuration)
|
||||
continue
|
||||
}
|
||||
|
||||
return gceCloudProvider, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Failed to get GCE GCECloudProvider with error %v", err)
|
||||
}
|
||||
|
||||
// Triggers the application of udev rules by calling "udevadm trigger
|
||||
// --action=change" for newly created "/dev/sd*" drives (exist only in
|
||||
// after set). This is workaround for Issue #7972. Once the underlying
|
||||
// issue has been resolved, this may be removed.
|
||||
func udevadmChangeToNewDrives(sdBeforeSet sets.String) error {
|
||||
sdAfter, err := filepath.Glob(diskSDPattern)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error filepath.Glob(\"%s\"): %v\r\n", diskSDPattern, err)
|
||||
}
|
||||
|
||||
for _, sd := range sdAfter {
|
||||
if !sdBeforeSet.Has(sd) {
|
||||
return udevadmChangeToDrive(sd)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Calls "udevadm trigger --action=change" on the specified drive.
|
||||
// drivePath must be the block device path to trigger on, in the format "/dev/sd*", or a symlink to it.
|
||||
// This is workaround for Issue #7972. Once the underlying issue has been resolved, this may be removed.
|
||||
func udevadmChangeToDrive(drivePath string) error {
|
||||
glog.V(5).Infof("udevadmChangeToDrive: drive=%q", drivePath)
|
||||
|
||||
// Evaluate symlink, if any
|
||||
drive, err := filepath.EvalSymlinks(drivePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("udevadmChangeToDrive: filepath.EvalSymlinks(%q) failed with %v.", drivePath, err)
|
||||
}
|
||||
glog.V(5).Infof("udevadmChangeToDrive: symlink path is %q", drive)
|
||||
|
||||
// Check to make sure input is "/dev/sd*"
|
||||
if !strings.Contains(drive, diskSDPath) {
|
||||
return fmt.Errorf("udevadmChangeToDrive: expected input in the form \"%s\" but drive is %q.", diskSDPattern, drive)
|
||||
}
|
||||
|
||||
// Call "udevadm trigger --action=change --property-match=DEVNAME=/dev/sd..."
|
||||
_, err = exec.New().Command(
|
||||
"udevadm",
|
||||
"trigger",
|
||||
"--action=change",
|
||||
fmt.Sprintf("--property-match=DEVNAME=%s", drive)).CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("udevadmChangeToDrive: udevadm trigger failed for drive %q with %v.", drive, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
43
vendor/k8s.io/kubernetes/pkg/volume/git_repo/BUILD
generated
vendored
Normal file
43
vendor/k8s.io/kubernetes/pkg/volume/git_repo/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"git_repo.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["git_repo_test.go"],
|
||||
library = "go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/empty_dir:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
2
vendor/k8s.io/kubernetes/pkg/volume/git_repo/OWNERS
generated
vendored
Normal file
2
vendor/k8s.io/kubernetes/pkg/volume/git_repo/OWNERS
generated
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
maintainers:
|
||||
- thockin
|
||||
18
vendor/k8s.io/kubernetes/pkg/volume/git_repo/doc.go
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/pkg/volume/git_repo/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package git_repo contains the internal representation of git repo volumes.
|
||||
package git_repo // import "k8s.io/kubernetes/pkg/volume/git_repo"
|
||||
276
vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo.go
generated
vendored
Normal file
276
vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo.go
generated
vendored
Normal file
|
|
@ -0,0 +1,276 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package git_repo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&gitRepoPlugin{nil}}
|
||||
}
|
||||
|
||||
type gitRepoPlugin struct {
|
||||
host volume.VolumeHost
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &gitRepoPlugin{}
|
||||
|
||||
func wrappedVolumeSpec() volume.Spec {
|
||||
return volume.Spec{
|
||||
Volume: &v1.Volume{VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}},
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
gitRepoPluginName = "kubernetes.io/git-repo"
|
||||
)
|
||||
|
||||
func (plugin *gitRepoPlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *gitRepoPlugin) GetPluginName() string {
|
||||
return gitRepoPluginName
|
||||
}
|
||||
|
||||
func (plugin *gitRepoPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _ := getVolumeSource(spec)
|
||||
if volumeSource == nil {
|
||||
return "", fmt.Errorf("Spec does not reference a Git repo volume type")
|
||||
}
|
||||
|
||||
return fmt.Sprintf(
|
||||
"%v:%v:%v",
|
||||
volumeSource.Repository,
|
||||
volumeSource.Revision,
|
||||
volumeSource.Directory), nil
|
||||
}
|
||||
|
||||
func (plugin *gitRepoPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
return spec.Volume != nil && spec.Volume.GitRepo != nil
|
||||
}
|
||||
|
||||
func (plugin *gitRepoPlugin) RequiresRemount() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *gitRepoPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||
return &gitRepoVolumeMounter{
|
||||
gitRepoVolume: &gitRepoVolume{
|
||||
volName: spec.Name(),
|
||||
podUID: pod.UID,
|
||||
plugin: plugin,
|
||||
},
|
||||
pod: *pod,
|
||||
source: spec.Volume.GitRepo.Repository,
|
||||
revision: spec.Volume.GitRepo.Revision,
|
||||
target: spec.Volume.GitRepo.Directory,
|
||||
exec: exec.New(),
|
||||
opts: opts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *gitRepoPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
return &gitRepoVolumeUnmounter{
|
||||
&gitRepoVolume{
|
||||
volName: volName,
|
||||
podUID: podUID,
|
||||
plugin: plugin,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *gitRepoPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
gitVolume := &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GitRepo: &v1.GitRepoVolumeSource{},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(gitVolume), nil
|
||||
}
|
||||
|
||||
// gitRepo volumes are directories which are pre-filled from a git repository.
|
||||
// These do not persist beyond the lifetime of a pod.
|
||||
type gitRepoVolume struct {
|
||||
volName string
|
||||
podUID types.UID
|
||||
plugin *gitRepoPlugin
|
||||
volume.MetricsNil
|
||||
}
|
||||
|
||||
var _ volume.Volume = &gitRepoVolume{}
|
||||
|
||||
func (gr *gitRepoVolume) GetPath() string {
|
||||
name := gitRepoPluginName
|
||||
return gr.plugin.host.GetPodVolumeDir(gr.podUID, utilstrings.EscapeQualifiedNameForDisk(name), gr.volName)
|
||||
}
|
||||
|
||||
// gitRepoVolumeMounter builds git repo volumes.
|
||||
type gitRepoVolumeMounter struct {
|
||||
*gitRepoVolume
|
||||
|
||||
pod v1.Pod
|
||||
source string
|
||||
revision string
|
||||
target string
|
||||
exec exec.Interface
|
||||
opts volume.VolumeOptions
|
||||
}
|
||||
|
||||
var _ volume.Mounter = &gitRepoVolumeMounter{}
|
||||
|
||||
func (b *gitRepoVolumeMounter) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: false,
|
||||
Managed: true,
|
||||
SupportsSELinux: true, // xattr change should be okay, TODO: double check
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (b *gitRepoVolumeMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUp creates new directory and clones a git repo.
|
||||
func (b *gitRepoVolumeMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
// SetUpAt creates new directory and clones a git repo.
|
||||
func (b *gitRepoVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
if volumeutil.IsReady(b.getMetaDir()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wrap EmptyDir, let it do the setup.
|
||||
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), &b.pod, b.opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
args := []string{"clone", b.source}
|
||||
|
||||
if len(b.target) != 0 {
|
||||
args = append(args, b.target)
|
||||
}
|
||||
if output, err := b.execCommand("git", args, dir); err != nil {
|
||||
return fmt.Errorf("failed to exec 'git %s': %s: %v",
|
||||
strings.Join(args, " "), output, err)
|
||||
}
|
||||
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(b.revision) == 0 {
|
||||
// Done!
|
||||
volumeutil.SetReady(b.getMetaDir())
|
||||
return nil
|
||||
}
|
||||
|
||||
var subdir string
|
||||
|
||||
switch {
|
||||
case b.target == ".":
|
||||
// if target dir is '.', use the current dir
|
||||
subdir = path.Join(dir)
|
||||
case len(files) == 1:
|
||||
// if target is not '.', use the generated folder
|
||||
subdir = path.Join(dir, files[0].Name())
|
||||
default:
|
||||
// if target is not '.', but generated many files, it's wrong
|
||||
return fmt.Errorf("unexpected directory contents: %v", files)
|
||||
}
|
||||
|
||||
if output, err := b.execCommand("git", []string{"checkout", b.revision}, subdir); err != nil {
|
||||
return fmt.Errorf("failed to exec 'git checkout %s': %s: %v", b.revision, output, err)
|
||||
}
|
||||
if output, err := b.execCommand("git", []string{"reset", "--hard"}, subdir); err != nil {
|
||||
return fmt.Errorf("failed to exec 'git reset --hard': %s: %v", output, err)
|
||||
}
|
||||
|
||||
volume.SetVolumeOwnership(b, fsGroup)
|
||||
|
||||
volumeutil.SetReady(b.getMetaDir())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *gitRepoVolumeMounter) getMetaDir() string {
|
||||
return path.Join(b.plugin.host.GetPodPluginDir(b.podUID, utilstrings.EscapeQualifiedNameForDisk(gitRepoPluginName)), b.volName)
|
||||
}
|
||||
|
||||
func (b *gitRepoVolumeMounter) execCommand(command string, args []string, dir string) ([]byte, error) {
|
||||
cmd := b.exec.Command(command, args...)
|
||||
cmd.SetDir(dir)
|
||||
return cmd.CombinedOutput()
|
||||
}
|
||||
|
||||
// gitRepoVolumeUnmounter cleans git repo volumes.
|
||||
type gitRepoVolumeUnmounter struct {
|
||||
*gitRepoVolume
|
||||
}
|
||||
|
||||
var _ volume.Unmounter = &gitRepoVolumeUnmounter{}
|
||||
|
||||
// TearDown simply deletes everything in the directory.
|
||||
func (c *gitRepoVolumeUnmounter) TearDown() error {
|
||||
return c.TearDownAt(c.GetPath())
|
||||
}
|
||||
|
||||
// TearDownAt simply deletes everything in the directory.
|
||||
func (c *gitRepoVolumeUnmounter) TearDownAt(dir string) error {
|
||||
|
||||
// Wrap EmptyDir, let it do the teardown.
|
||||
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return wrapped.TearDownAt(dir)
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.GitRepoVolumeSource, bool) {
|
||||
var readOnly bool
|
||||
var volumeSource *v1.GitRepoVolumeSource
|
||||
|
||||
if spec.Volume != nil && spec.Volume.GitRepo != nil {
|
||||
volumeSource = spec.Volume.GitRepo
|
||||
readOnly = spec.ReadOnly
|
||||
}
|
||||
|
||||
return volumeSource, readOnly
|
||||
}
|
||||
382
vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo_test.go
generated
vendored
Normal file
382
vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,382 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package git_repo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/empty_dir"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
func newTestHost(t *testing.T) (string, volume.VolumeHost) {
|
||||
tempDir, err := ioutil.TempDir("/tmp", "git_repo_test.")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp rootdir: %v", err)
|
||||
}
|
||||
return tempDir, volumetest.NewFakeVolumeHost(tempDir, nil, empty_dir.ProbeVolumePlugins())
|
||||
}
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
tempDir, host := newTestHost(t)
|
||||
defer os.RemoveAll(tempDir)
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), host)
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/git-repo")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if plug.GetPluginName() != "kubernetes.io/git-repo" {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{GitRepo: &v1.GitRepoVolumeSource{}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
}
|
||||
|
||||
// Expected command
|
||||
type expectedCommand struct {
|
||||
// The git command
|
||||
cmd []string
|
||||
// The dir of git command is executed
|
||||
dir string
|
||||
}
|
||||
|
||||
func TestPlugin(t *testing.T) {
|
||||
gitUrl := "https://github.com/kubernetes/kubernetes.git"
|
||||
revision := "2a30ce65c5ab586b98916d83385c5983edd353a1"
|
||||
|
||||
scenarios := []struct {
|
||||
name string
|
||||
vol *v1.Volume
|
||||
expecteds []expectedCommand
|
||||
isExpectedFailure bool
|
||||
}{
|
||||
{
|
||||
name: "target-dir",
|
||||
vol: &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GitRepo: &v1.GitRepoVolumeSource{
|
||||
Repository: gitUrl,
|
||||
Revision: revision,
|
||||
Directory: "target_dir",
|
||||
},
|
||||
},
|
||||
},
|
||||
expecteds: []expectedCommand{
|
||||
{
|
||||
cmd: []string{"git", "clone", gitUrl, "target_dir"},
|
||||
dir: "",
|
||||
},
|
||||
{
|
||||
cmd: []string{"git", "checkout", revision},
|
||||
dir: "/target_dir",
|
||||
},
|
||||
{
|
||||
cmd: []string{"git", "reset", "--hard"},
|
||||
dir: "/target_dir",
|
||||
},
|
||||
},
|
||||
isExpectedFailure: false,
|
||||
},
|
||||
{
|
||||
name: "target-dir-no-revision",
|
||||
vol: &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GitRepo: &v1.GitRepoVolumeSource{
|
||||
Repository: gitUrl,
|
||||
Directory: "target_dir",
|
||||
},
|
||||
},
|
||||
},
|
||||
expecteds: []expectedCommand{
|
||||
{
|
||||
cmd: []string{"git", "clone", gitUrl, "target_dir"},
|
||||
dir: "",
|
||||
},
|
||||
},
|
||||
isExpectedFailure: false,
|
||||
},
|
||||
{
|
||||
name: "only-git-clone",
|
||||
vol: &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GitRepo: &v1.GitRepoVolumeSource{
|
||||
Repository: gitUrl,
|
||||
},
|
||||
},
|
||||
},
|
||||
expecteds: []expectedCommand{
|
||||
{
|
||||
cmd: []string{"git", "clone", gitUrl},
|
||||
dir: "",
|
||||
},
|
||||
},
|
||||
isExpectedFailure: false,
|
||||
},
|
||||
{
|
||||
name: "no-target-dir",
|
||||
vol: &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GitRepo: &v1.GitRepoVolumeSource{
|
||||
Repository: gitUrl,
|
||||
Revision: revision,
|
||||
Directory: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
expecteds: []expectedCommand{
|
||||
{
|
||||
cmd: []string{"git", "clone", gitUrl},
|
||||
dir: "",
|
||||
},
|
||||
{
|
||||
cmd: []string{"git", "checkout", revision},
|
||||
dir: "/kubernetes",
|
||||
},
|
||||
{
|
||||
cmd: []string{"git", "reset", "--hard"},
|
||||
dir: "/kubernetes",
|
||||
},
|
||||
},
|
||||
isExpectedFailure: false,
|
||||
},
|
||||
{
|
||||
name: "current-dir",
|
||||
vol: &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GitRepo: &v1.GitRepoVolumeSource{
|
||||
Repository: gitUrl,
|
||||
Revision: revision,
|
||||
Directory: ".",
|
||||
},
|
||||
},
|
||||
},
|
||||
expecteds: []expectedCommand{
|
||||
{
|
||||
cmd: []string{"git", "clone", gitUrl, "."},
|
||||
dir: "",
|
||||
},
|
||||
{
|
||||
cmd: []string{"git", "checkout", revision},
|
||||
dir: "",
|
||||
},
|
||||
{
|
||||
cmd: []string{"git", "reset", "--hard"},
|
||||
dir: "",
|
||||
},
|
||||
},
|
||||
isExpectedFailure: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, scenario := range scenarios {
|
||||
allErrs := doTestPlugin(scenario, t)
|
||||
if len(allErrs) == 0 && scenario.isExpectedFailure {
|
||||
t.Errorf("Unexpected success for scenario: %s", scenario.name)
|
||||
}
|
||||
if len(allErrs) > 0 && !scenario.isExpectedFailure {
|
||||
t.Errorf("Unexpected failure for scenario: %s - %+v", scenario.name, allErrs)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func doTestPlugin(scenario struct {
|
||||
name string
|
||||
vol *v1.Volume
|
||||
expecteds []expectedCommand
|
||||
isExpectedFailure bool
|
||||
}, t *testing.T) []error {
|
||||
allErrs := []error{}
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
rootDir, host := newTestHost(t)
|
||||
defer os.RemoveAll(rootDir)
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), host)
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/git-repo")
|
||||
if err != nil {
|
||||
allErrs = append(allErrs,
|
||||
fmt.Errorf("Can't find the plugin by name"))
|
||||
return allErrs
|
||||
}
|
||||
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, err := plug.NewMounter(volume.NewSpecFromVolume(scenario.vol), pod, volume.VolumeOptions{})
|
||||
|
||||
if err != nil {
|
||||
allErrs = append(allErrs,
|
||||
fmt.Errorf("Failed to make a new Mounter: %v", err))
|
||||
return allErrs
|
||||
}
|
||||
if mounter == nil {
|
||||
allErrs = append(allErrs,
|
||||
fmt.Errorf("Got a nil Mounter"))
|
||||
return allErrs
|
||||
}
|
||||
|
||||
path := mounter.GetPath()
|
||||
suffix := fmt.Sprintf("pods/poduid/volumes/kubernetes.io~git-repo/%v", scenario.vol.Name)
|
||||
if !strings.HasSuffix(path, suffix) {
|
||||
allErrs = append(allErrs,
|
||||
fmt.Errorf("Got unexpected path: %s", path))
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// Test setUp()
|
||||
setUpErrs := doTestSetUp(scenario, mounter)
|
||||
allErrs = append(allErrs, setUpErrs...)
|
||||
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
allErrs = append(allErrs,
|
||||
fmt.Errorf("SetUp() failed, volume path not created: %s", path))
|
||||
return allErrs
|
||||
} else {
|
||||
allErrs = append(allErrs,
|
||||
fmt.Errorf("SetUp() failed: %v", err))
|
||||
return allErrs
|
||||
}
|
||||
}
|
||||
|
||||
// gitRepo volume should create its own empty wrapper path
|
||||
podWrapperMetadataDir := fmt.Sprintf("%v/pods/poduid/plugins/kubernetes.io~empty-dir/wrapped_%v", rootDir, scenario.vol.Name)
|
||||
|
||||
if _, err := os.Stat(podWrapperMetadataDir); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
allErrs = append(allErrs,
|
||||
fmt.Errorf("SetUp() failed, empty-dir wrapper path is not created: %s", podWrapperMetadataDir))
|
||||
} else {
|
||||
allErrs = append(allErrs,
|
||||
fmt.Errorf("SetUp() failed: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
unmounter, err := plug.NewUnmounter("vol1", types.UID("poduid"))
|
||||
if err != nil {
|
||||
allErrs = append(allErrs,
|
||||
fmt.Errorf("Failed to make a new Unmounter: %v", err))
|
||||
return allErrs
|
||||
}
|
||||
if unmounter == nil {
|
||||
allErrs = append(allErrs,
|
||||
fmt.Errorf("Got a nil Unmounter"))
|
||||
return allErrs
|
||||
}
|
||||
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
allErrs = append(allErrs,
|
||||
fmt.Errorf("Expected success, got: %v", err))
|
||||
return allErrs
|
||||
}
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
allErrs = append(allErrs,
|
||||
fmt.Errorf("TearDown() failed, volume path still exists: %s", path))
|
||||
} else if !os.IsNotExist(err) {
|
||||
allErrs = append(allErrs,
|
||||
fmt.Errorf("SetUp() failed: %v", err))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func doTestSetUp(scenario struct {
|
||||
name string
|
||||
vol *v1.Volume
|
||||
expecteds []expectedCommand
|
||||
isExpectedFailure bool
|
||||
}, mounter volume.Mounter) []error {
|
||||
expecteds := scenario.expecteds
|
||||
allErrs := []error{}
|
||||
|
||||
// Construct combined outputs from expected commands
|
||||
var fakeOutputs []exec.FakeCombinedOutputAction
|
||||
var fcmd exec.FakeCmd
|
||||
for _, expected := range expecteds {
|
||||
if expected.cmd[1] == "clone" {
|
||||
fakeOutputs = append(fakeOutputs, func() ([]byte, error) {
|
||||
// git clone, it creates new dir/files
|
||||
os.MkdirAll(path.Join(fcmd.Dirs[0], expected.dir), 0750)
|
||||
return []byte{}, nil
|
||||
})
|
||||
} else {
|
||||
// git checkout || git reset, they create nothing
|
||||
fakeOutputs = append(fakeOutputs, func() ([]byte, error) {
|
||||
return []byte{}, nil
|
||||
})
|
||||
}
|
||||
}
|
||||
fcmd = exec.FakeCmd{
|
||||
CombinedOutputScript: fakeOutputs,
|
||||
}
|
||||
|
||||
// Construct fake exec outputs from fcmd
|
||||
var fakeAction []exec.FakeCommandAction
|
||||
for i := 0; i < len(expecteds); i++ {
|
||||
fakeAction = append(fakeAction, func(cmd string, args ...string) exec.Cmd {
|
||||
return exec.InitFakeCmd(&fcmd, cmd, args...)
|
||||
})
|
||||
|
||||
}
|
||||
fake := exec.FakeExec{
|
||||
CommandScript: fakeAction,
|
||||
}
|
||||
|
||||
g := mounter.(*gitRepoVolumeMounter)
|
||||
g.exec = &fake
|
||||
|
||||
g.SetUp(nil)
|
||||
|
||||
if fake.CommandCalls != len(expecteds) {
|
||||
allErrs = append(allErrs,
|
||||
fmt.Errorf("unexpected command calls in scenario: expected %d, saw: %d", len(expecteds), fake.CommandCalls))
|
||||
}
|
||||
var expectedCmds [][]string
|
||||
for _, expected := range expecteds {
|
||||
expectedCmds = append(expectedCmds, expected.cmd)
|
||||
}
|
||||
if !reflect.DeepEqual(expectedCmds, fcmd.CombinedOutputLog) {
|
||||
allErrs = append(allErrs,
|
||||
fmt.Errorf("unexpected commands: %v, expected: %v", fcmd.CombinedOutputLog, expectedCmds))
|
||||
}
|
||||
|
||||
var expectedPaths []string
|
||||
for _, expected := range expecteds {
|
||||
expectedPaths = append(expectedPaths, g.GetPath()+expected.dir)
|
||||
}
|
||||
if len(fcmd.Dirs) != len(expectedPaths) || !reflect.DeepEqual(expectedPaths, fcmd.Dirs) {
|
||||
allErrs = append(allErrs,
|
||||
fmt.Errorf("unexpected directories: %v, expected: %v", fcmd.Dirs, expectedPaths))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
55
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/BUILD
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"glusterfs.go",
|
||||
"glusterfs_util.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/errors:go_default_library",
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/release_1_5:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/heketi/heketi/client/api/go-client",
|
||||
"//vendor:github.com/heketi/heketi/pkg/glusterfs/api",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["glusterfs_test.go"],
|
||||
library = "go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
|
||||
"//pkg/client/testing/core:go_default_library",
|
||||
"//pkg/runtime:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/testing:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
2
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/OWNERS
generated
vendored
Normal file
2
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/OWNERS
generated
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
assignees:
|
||||
- rootfs
|
||||
19
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package glusterfs contains the internal representation of glusterfs
|
||||
// volumes.
|
||||
package glusterfs // import "k8s.io/kubernetes/pkg/volume/glusterfs"
|
||||
715
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go
generated
vendored
Normal file
715
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
373
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs_test.go
generated
vendored
Normal file
373
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,373 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package glusterfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/glusterfs")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if plug.GetPluginName() != "kubernetes.io/glusterfs" {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{}}}}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAccessModes(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/glusterfs")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if !contains(plug.GetAccessModes(), v1.ReadWriteOnce) || !contains(plug.GetAccessModes(), v1.ReadOnlyMany) || !contains(plug.GetAccessModes(), v1.ReadWriteMany) {
|
||||
t.Errorf("Expected three AccessModeTypes: %s, %s, and %s", v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany)
|
||||
}
|
||||
}
|
||||
|
||||
func contains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
|
||||
for _, m := range modes {
|
||||
if m == mode {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/glusterfs")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
ep := &v1.Endpoints{ObjectMeta: v1.ObjectMeta{Name: "foo"}, Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}}}}
|
||||
var fcmd exec.FakeCmd
|
||||
fcmd = exec.FakeCmd{
|
||||
CombinedOutputScript: []exec.FakeCombinedOutputAction{
|
||||
// mount
|
||||
func() ([]byte, error) {
|
||||
return []byte{}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
fake := exec.FakeExec{
|
||||
CommandScript: []exec.FakeCommandAction{
|
||||
func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
|
||||
},
|
||||
}
|
||||
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, err := plug.(*glusterfsPlugin).newMounterInternal(spec, ep, pod, &mount.FakeMounter{}, &fake)
|
||||
volumePath := mounter.GetPath()
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Error("Got a nil Mounter")
|
||||
}
|
||||
path := mounter.GetPath()
|
||||
expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~glusterfs/vol1", tmpDir)
|
||||
if path != expectedPath {
|
||||
t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path)
|
||||
}
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(volumePath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
unmounter, err := plug.(*glusterfsPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
t.Error("Got a nil Unmounter")
|
||||
}
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(volumePath); err == nil {
|
||||
t.Errorf("TearDown() failed, volume path still exists: %s", volumePath)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginVolume(t *testing.T) {
|
||||
vol := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{Glusterfs: &v1.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false}},
|
||||
}
|
||||
doTestPlugin(t, volume.NewSpecFromVolume(vol))
|
||||
}
|
||||
|
||||
func TestPluginPersistentVolume(t *testing.T) {
|
||||
vol := &v1.PersistentVolume{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "vol1",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Glusterfs: &v1.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false))
|
||||
}
|
||||
|
||||
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "pvA",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Glusterfs: &v1.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false},
|
||||
},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
Name: "claimA",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
claim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "claimA",
|
||||
Namespace: "nsA",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pvA",
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimBound,
|
||||
},
|
||||
}
|
||||
|
||||
ep := &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "nsA",
|
||||
Name: "ep",
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []v1.EndpointPort{{Name: "foo", Port: 80, Protocol: v1.ProtocolTCP}},
|
||||
}},
|
||||
}
|
||||
|
||||
client := fake.NewSimpleClientset(pv, claim, ep)
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
|
||||
plug, _ := plugMgr.FindPluginByName(glusterfsPluginName)
|
||||
|
||||
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, true)
|
||||
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{Namespace: "nsA", UID: types.UID("poduid")}}
|
||||
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
|
||||
|
||||
if !mounter.GetAttributes().ReadOnly {
|
||||
t.Errorf("Expected true for mounter.IsReadOnly")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseClassParameters(t *testing.T) {
|
||||
secret := v1.Secret{
|
||||
Type: "kubernetes.io/glusterfs",
|
||||
Data: map[string][]byte{
|
||||
"data": []byte("mypassword"),
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
parameters map[string]string
|
||||
secret *v1.Secret
|
||||
expectError bool
|
||||
expectConfig *provisioningConfig
|
||||
}{
|
||||
{
|
||||
"password",
|
||||
map[string]string{
|
||||
"resturl": "https://localhost:8080",
|
||||
"restuser": "admin",
|
||||
"restuserkey": "password",
|
||||
},
|
||||
nil, // secret
|
||||
false, // expect error
|
||||
&provisioningConfig{
|
||||
url: "https://localhost:8080",
|
||||
user: "admin",
|
||||
userKey: "password",
|
||||
secretValue: "password",
|
||||
},
|
||||
},
|
||||
{
|
||||
"secret",
|
||||
map[string]string{
|
||||
"resturl": "https://localhost:8080",
|
||||
"restuser": "admin",
|
||||
"secretname": "mysecret",
|
||||
"secretnamespace": "default",
|
||||
},
|
||||
&secret,
|
||||
false, // expect error
|
||||
&provisioningConfig{
|
||||
url: "https://localhost:8080",
|
||||
user: "admin",
|
||||
secretName: "mysecret",
|
||||
secretNamespace: "default",
|
||||
secretValue: "mypassword",
|
||||
},
|
||||
},
|
||||
{
|
||||
"no authentication",
|
||||
map[string]string{
|
||||
"resturl": "https://localhost:8080",
|
||||
"restauthenabled": "false",
|
||||
},
|
||||
&secret,
|
||||
false, // expect error
|
||||
&provisioningConfig{
|
||||
url: "https://localhost:8080",
|
||||
},
|
||||
},
|
||||
{
|
||||
"missing secret",
|
||||
map[string]string{
|
||||
"resturl": "https://localhost:8080",
|
||||
"secretname": "mysecret",
|
||||
"secretnamespace": "default",
|
||||
},
|
||||
nil, // secret
|
||||
true, // expect error
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"secret with no namespace",
|
||||
map[string]string{
|
||||
"resturl": "https://localhost:8080",
|
||||
"secretname": "mysecret",
|
||||
},
|
||||
&secret,
|
||||
true, // expect error
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"missing url",
|
||||
map[string]string{
|
||||
"restuser": "admin",
|
||||
"restuserkey": "password",
|
||||
},
|
||||
nil, // secret
|
||||
true, // expect error
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"unknown parameter",
|
||||
map[string]string{
|
||||
"unknown": "yes",
|
||||
"resturl": "https://localhost:8080",
|
||||
"restuser": "admin",
|
||||
"restuserkey": "password",
|
||||
},
|
||||
nil, // secret
|
||||
true, // expect error
|
||||
nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
||||
client := &fake.Clientset{}
|
||||
client.AddReactor("get", "secrets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
if test.secret != nil {
|
||||
return true, test.secret, nil
|
||||
}
|
||||
return true, nil, fmt.Errorf("Test %s did not set a secret", test.name)
|
||||
})
|
||||
|
||||
cfg, err := parseClassParameters(test.parameters, client)
|
||||
|
||||
if err != nil && !test.expectError {
|
||||
t.Errorf("Test %s got unexpected error %v", test.name, err)
|
||||
}
|
||||
if err == nil && test.expectError {
|
||||
t.Errorf("test %s expected error and got none", test.name)
|
||||
}
|
||||
if test.expectConfig != nil {
|
||||
if !reflect.DeepEqual(cfg, test.expectConfig) {
|
||||
t.Errorf("Test %s returned unexpected data, expected: %+v, got: %+v", test.name, test.expectConfig, cfg)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
71
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs_util.go
generated
vendored
Normal file
71
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs_util.go
generated
vendored
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package glusterfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// readGlusterLog will take the last 2 lines of the log file
|
||||
// on failure of gluster SetUp and return those so kubelet can
|
||||
// properly expose them
|
||||
// return nil on any failure
|
||||
func readGlusterLog(path string, podName string) error {
|
||||
|
||||
var line1 string
|
||||
var line2 string
|
||||
linecount := 0
|
||||
|
||||
glog.Infof("glusterfs: failure, now attempting to read the gluster log for pod %s", podName)
|
||||
|
||||
// Check and make sure path exists
|
||||
if len(path) == 0 {
|
||||
return fmt.Errorf("glusterfs: log file does not exist for pod: %s", podName)
|
||||
}
|
||||
|
||||
// open the log file
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("glusterfs: could not open log file for pod: %s", podName)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// read in and scan the file using scanner
|
||||
// from stdlib
|
||||
fscan := bufio.NewScanner(file)
|
||||
|
||||
// rather than guessing on bytes or using Seek
|
||||
// going to scan entire file and take the last two lines
|
||||
// generally the file should be small since it is pod specific
|
||||
for fscan.Scan() {
|
||||
if linecount > 0 {
|
||||
line1 = line2
|
||||
}
|
||||
line2 = "\n" + fscan.Text()
|
||||
|
||||
linecount++
|
||||
}
|
||||
|
||||
if linecount > 0 {
|
||||
return fmt.Errorf("%v", line1+line2+"\n")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
44
vendor/k8s.io/kubernetes/pkg/volume/host_path/BUILD
generated
vendored
Normal file
44
vendor/k8s.io/kubernetes/pkg/volume/host_path/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"host_path.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/conversion:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/uuid:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["host_path_test.go"],
|
||||
library = "go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util:go_default_library",
|
||||
"//pkg/util/uuid:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
3
vendor/k8s.io/kubernetes/pkg/volume/host_path/OWNERS
generated
vendored
Normal file
3
vendor/k8s.io/kubernetes/pkg/volume/host_path/OWNERS
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
maintainers:
|
||||
- saad-ali
|
||||
- thockin
|
||||
19
vendor/k8s.io/kubernetes/pkg/volume/host_path/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/volume/host_path/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package host_path contains the internal representation of hostPath
|
||||
// volumes.
|
||||
package host_path // import "k8s.io/kubernetes/pkg/volume/host_path"
|
||||
338
vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path.go
generated
vendored
Normal file
338
vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path.go
generated
vendored
Normal file
|
|
@ -0,0 +1,338 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package host_path
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/conversion"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
// The volumeConfig arg provides the ability to configure volume behavior. It is implemented as a pointer to allow nils.
|
||||
// The hostPathPlugin is used to store the volumeConfig and give it, when needed, to the func that creates HostPath Recyclers.
|
||||
// Tests that exercise recycling should not use this func but instead use ProbeRecyclablePlugins() to override default behavior.
|
||||
func ProbeVolumePlugins(volumeConfig volume.VolumeConfig) []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{
|
||||
&hostPathPlugin{
|
||||
host: nil,
|
||||
config: volumeConfig,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type hostPathPlugin struct {
|
||||
host volume.VolumeHost
|
||||
config volume.VolumeConfig
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &hostPathPlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &hostPathPlugin{}
|
||||
var _ volume.RecyclableVolumePlugin = &hostPathPlugin{}
|
||||
var _ volume.DeletableVolumePlugin = &hostPathPlugin{}
|
||||
var _ volume.ProvisionableVolumePlugin = &hostPathPlugin{}
|
||||
|
||||
const (
|
||||
hostPathPluginName = "kubernetes.io/host-path"
|
||||
)
|
||||
|
||||
func (plugin *hostPathPlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) GetPluginName() string {
|
||||
return hostPathPluginName
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return volumeSource.Path, nil
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.HostPath != nil) ||
|
||||
(spec.Volume != nil && spec.Volume.HostPath != nil)
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) RequiresRemount() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
return []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
hostPathVolumeSource, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &hostPathMounter{
|
||||
hostPath: &hostPath{path: hostPathVolumeSource.Path},
|
||||
readOnly: readOnly,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
return &hostPathUnmounter{&hostPath{
|
||||
path: "",
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) NewRecycler(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder) (volume.Recycler, error) {
|
||||
return newRecycler(pvName, spec, eventRecorder, plugin.host, plugin.config)
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
||||
return newDeleter(spec, plugin.host)
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
if !plugin.config.ProvisioningEnabled {
|
||||
return nil, fmt.Errorf("Provisioning in volume plugin %q is disabled", plugin.GetPluginName())
|
||||
}
|
||||
return newProvisioner(options, plugin.host, plugin)
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
hostPathVolume := &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: volumeName,
|
||||
},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(hostPathVolume), nil
|
||||
}
|
||||
|
||||
func newRecycler(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder, host volume.VolumeHost, config volume.VolumeConfig) (volume.Recycler, error) {
|
||||
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.HostPath == nil {
|
||||
return nil, fmt.Errorf("spec.PersistentVolumeSource.HostPath is nil")
|
||||
}
|
||||
path := spec.PersistentVolume.Spec.HostPath.Path
|
||||
return &hostPathRecycler{
|
||||
name: spec.Name(),
|
||||
path: path,
|
||||
host: host,
|
||||
config: config,
|
||||
timeout: volume.CalculateTimeoutForVolume(config.RecyclerMinimumTimeout, config.RecyclerTimeoutIncrement, spec.PersistentVolume),
|
||||
pvName: pvName,
|
||||
eventRecorder: eventRecorder,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newDeleter(spec *volume.Spec, host volume.VolumeHost) (volume.Deleter, error) {
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.HostPath == nil {
|
||||
return nil, fmt.Errorf("spec.PersistentVolumeSource.HostPath is nil")
|
||||
}
|
||||
path := spec.PersistentVolume.Spec.HostPath.Path
|
||||
return &hostPathDeleter{name: spec.Name(), path: path, host: host}, nil
|
||||
}
|
||||
|
||||
func newProvisioner(options volume.VolumeOptions, host volume.VolumeHost, plugin *hostPathPlugin) (volume.Provisioner, error) {
|
||||
return &hostPathProvisioner{options: options, host: host, plugin: plugin}, nil
|
||||
}
|
||||
|
||||
// HostPath volumes represent a bare host file or directory mount.
|
||||
// The direct at the specified path will be directly exposed to the container.
|
||||
type hostPath struct {
|
||||
path string
|
||||
volume.MetricsNil
|
||||
}
|
||||
|
||||
func (hp *hostPath) GetPath() string {
|
||||
return hp.path
|
||||
}
|
||||
|
||||
type hostPathMounter struct {
|
||||
*hostPath
|
||||
readOnly bool
|
||||
}
|
||||
|
||||
var _ volume.Mounter = &hostPathMounter{}
|
||||
|
||||
func (b *hostPathMounter) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: b.readOnly,
|
||||
Managed: false,
|
||||
SupportsSELinux: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (b *hostPathMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUp does nothing.
|
||||
func (b *hostPathMounter) SetUp(fsGroup *int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUpAt does not make sense for host paths - probably programmer error.
|
||||
func (b *hostPathMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
return fmt.Errorf("SetUpAt() does not make sense for host paths")
|
||||
}
|
||||
|
||||
func (b *hostPathMounter) GetPath() string {
|
||||
return b.path
|
||||
}
|
||||
|
||||
type hostPathUnmounter struct {
|
||||
*hostPath
|
||||
}
|
||||
|
||||
var _ volume.Unmounter = &hostPathUnmounter{}
|
||||
|
||||
// TearDown does nothing.
|
||||
func (c *hostPathUnmounter) TearDown() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TearDownAt does not make sense for host paths - probably programmer error.
|
||||
func (c *hostPathUnmounter) TearDownAt(dir string) error {
|
||||
return fmt.Errorf("TearDownAt() does not make sense for host paths")
|
||||
}
|
||||
|
||||
// hostPathRecycler implements a Recycler for the HostPath plugin
|
||||
// This implementation is meant for testing only and only works in a single node cluster
|
||||
type hostPathRecycler struct {
|
||||
name string
|
||||
path string
|
||||
host volume.VolumeHost
|
||||
config volume.VolumeConfig
|
||||
timeout int64
|
||||
volume.MetricsNil
|
||||
pvName string
|
||||
eventRecorder volume.RecycleEventRecorder
|
||||
}
|
||||
|
||||
func (r *hostPathRecycler) GetPath() string {
|
||||
return r.path
|
||||
}
|
||||
|
||||
// Recycle recycles/scrubs clean a HostPath volume.
|
||||
// Recycle blocks until the pod has completed or any error occurs.
|
||||
// HostPath recycling only works in single node clusters and is meant for testing purposes only.
|
||||
func (r *hostPathRecycler) Recycle() error {
|
||||
templateClone, err := conversion.NewCloner().DeepCopy(r.config.RecyclerPodTemplate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pod := templateClone.(*v1.Pod)
|
||||
// overrides
|
||||
pod.Spec.ActiveDeadlineSeconds = &r.timeout
|
||||
pod.Spec.Volumes[0].VolumeSource = v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: r.path,
|
||||
},
|
||||
}
|
||||
return volume.RecycleVolumeByWatchingPodUntilCompletion(r.pvName, pod, r.host.GetKubeClient(), r.eventRecorder)
|
||||
}
|
||||
|
||||
// hostPathProvisioner implements a Provisioner for the HostPath plugin
|
||||
// This implementation is meant for testing only and only works in a single node cluster.
|
||||
type hostPathProvisioner struct {
|
||||
host volume.VolumeHost
|
||||
options volume.VolumeOptions
|
||||
plugin *hostPathPlugin
|
||||
}
|
||||
|
||||
// Create for hostPath simply creates a local /tmp/hostpath_pv/%s directory as a new PersistentVolume.
|
||||
// This Provisioner is meant for development and testing only and WILL NOT WORK in a multi-node cluster.
|
||||
func (r *hostPathProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", uuid.NewUUID())
|
||||
|
||||
capacity := r.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: r.options.PVName,
|
||||
Annotations: map[string]string{
|
||||
"kubernetes.io/createdby": "hostpath-dynamic-provisioner",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: r.options.PersistentVolumeReclaimPolicy,
|
||||
AccessModes: r.options.PVC.Spec.AccessModes,
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): capacity,
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: fullpath,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if len(r.options.PVC.Spec.AccessModes) == 0 {
|
||||
pv.Spec.AccessModes = r.plugin.GetAccessModes()
|
||||
}
|
||||
|
||||
return pv, os.MkdirAll(pv.Spec.HostPath.Path, 0750)
|
||||
}
|
||||
|
||||
// hostPathDeleter deletes a hostPath PV from the cluster.
|
||||
// This deleter only works on a single host cluster and is for testing purposes only.
|
||||
type hostPathDeleter struct {
|
||||
name string
|
||||
path string
|
||||
host volume.VolumeHost
|
||||
volume.MetricsNil
|
||||
}
|
||||
|
||||
func (r *hostPathDeleter) GetPath() string {
|
||||
return r.path
|
||||
}
|
||||
|
||||
// Delete for hostPath removes the local directory so long as it is beneath /tmp/*.
|
||||
// THIS IS FOR TESTING AND LOCAL DEVELOPMENT ONLY! This message should scare you away from using
|
||||
// this deleter for anything other than development and testing.
|
||||
func (r *hostPathDeleter) Delete() error {
|
||||
regexp := regexp.MustCompile("/tmp/.+")
|
||||
if !regexp.MatchString(r.GetPath()) {
|
||||
return fmt.Errorf("host_path deleter only supports /tmp/.+ but received provided %s", r.GetPath())
|
||||
}
|
||||
return os.RemoveAll(r.GetPath())
|
||||
}
|
||||
|
||||
func getVolumeSource(
|
||||
spec *volume.Spec) (*v1.HostPathVolumeSource, bool, error) {
|
||||
if spec.Volume != nil && spec.Volume.HostPath != nil {
|
||||
return spec.Volume.HostPath, spec.ReadOnly, nil
|
||||
} else if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.HostPath != nil {
|
||||
return spec.PersistentVolume.Spec.HostPath, spec.ReadOnly, nil
|
||||
}
|
||||
|
||||
return nil, false, fmt.Errorf("Spec does not reference an HostPath volume type")
|
||||
}
|
||||
276
vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path_test.go
generated
vendored
Normal file
276
vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,276 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package host_path
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("fake", nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/host-path")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if plug.GetPluginName() != "kubernetes.io/host-path" {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{}}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAccessModes(t *testing.T) {
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/host-path")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if len(plug.GetAccessModes()) != 1 || plug.GetAccessModes()[0] != v1.ReadWriteOnce {
|
||||
t.Errorf("Expected %s PersistentVolumeAccessMode", v1.ReadWriteOnce)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRecycler(t *testing.T) {
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
pluginHost := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil)
|
||||
plugMgr.InitPlugins([]volume.VolumePlugin{&hostPathPlugin{nil, volume.VolumeConfig{}}}, pluginHost)
|
||||
|
||||
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/foo"}}}}}
|
||||
plug, err := plugMgr.FindRecyclablePluginBySpec(spec)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
recycler, err := plug.NewRecycler("pv-name", spec, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Recycler: %v", err)
|
||||
}
|
||||
if recycler.GetPath() != spec.PersistentVolume.Spec.HostPath.Path {
|
||||
t.Errorf("Expected %s but got %s", spec.PersistentVolume.Spec.HostPath.Path, recycler.GetPath())
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleter(t *testing.T) {
|
||||
// Deleter has a hard-coded regex for "/tmp".
|
||||
tempPath := fmt.Sprintf("/tmp/hostpath/%s", uuid.NewUUID())
|
||||
defer os.RemoveAll(tempPath)
|
||||
err := os.MkdirAll(tempPath, 0750)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create tmp directory for deleter: %v", err)
|
||||
}
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil))
|
||||
|
||||
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: tempPath}}}}}
|
||||
plug, err := plugMgr.FindDeletablePluginBySpec(spec)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
deleter, err := plug.NewDeleter(spec)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Deleter: %v", err)
|
||||
}
|
||||
if deleter.GetPath() != tempPath {
|
||||
t.Errorf("Expected %s but got %s", tempPath, deleter.GetPath())
|
||||
}
|
||||
if err := deleter.Delete(); err != nil {
|
||||
t.Errorf("Mock Recycler expected to return nil but got %s", err)
|
||||
}
|
||||
if exists, _ := util.FileExists("foo"); exists {
|
||||
t.Errorf("Temp path expected to be deleted, but was found at %s", tempPath)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleterTempDir(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
expectedFailure bool
|
||||
path string
|
||||
}{
|
||||
"just-tmp": {true, "/tmp"},
|
||||
"not-tmp": {true, "/nottmp"},
|
||||
"good-tmp": {false, "/tmp/scratch"},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil))
|
||||
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: test.path}}}}}
|
||||
plug, _ := plugMgr.FindDeletablePluginBySpec(spec)
|
||||
deleter, _ := plug.NewDeleter(spec)
|
||||
err := deleter.Delete()
|
||||
if err == nil && test.expectedFailure {
|
||||
t.Errorf("Expected failure for test '%s' but got nil err", name)
|
||||
}
|
||||
if err != nil && !test.expectedFailure {
|
||||
t.Errorf("Unexpected failure for test '%s': %v", name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProvisioner(t *testing.T) {
|
||||
tempPath := fmt.Sprintf("/tmp/hostpath/%s", uuid.NewUUID())
|
||||
defer os.RemoveAll(tempPath)
|
||||
err := os.MkdirAll(tempPath, 0750)
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{ProvisioningEnabled: true}),
|
||||
volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil))
|
||||
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: tempPath}}}}}
|
||||
plug, err := plugMgr.FindCreatablePluginBySpec(spec)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
options := volume.VolumeOptions{
|
||||
PVC: volumetest.CreateTestPVC("1Gi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
|
||||
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
|
||||
}
|
||||
creater, err := plug.NewProvisioner(options)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Provisioner: %v", err)
|
||||
}
|
||||
pv, err := creater.Provision()
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error creating volume: %v", err)
|
||||
}
|
||||
if pv.Spec.HostPath.Path == "" {
|
||||
t.Errorf("Expected pv.Spec.HostPath.Path to not be empty: %#v", pv)
|
||||
}
|
||||
expectedCapacity := resource.NewQuantity(1*1024*1024*1024, resource.BinarySI)
|
||||
actualCapacity := pv.Spec.Capacity[v1.ResourceStorage]
|
||||
expectedAmt := expectedCapacity.Value()
|
||||
actualAmt := actualCapacity.Value()
|
||||
if expectedAmt != actualAmt {
|
||||
t.Errorf("Expected capacity %+v but got %+v", expectedAmt, actualAmt)
|
||||
}
|
||||
|
||||
if pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete {
|
||||
t.Errorf("Expected reclaim policy %+v but got %+v", v1.PersistentVolumeReclaimDelete, pv.Spec.PersistentVolumeReclaimPolicy)
|
||||
}
|
||||
|
||||
os.RemoveAll(pv.Spec.HostPath.Path)
|
||||
}
|
||||
|
||||
func TestPlugin(t *testing.T) {
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("fake", nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/host-path")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
spec := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/vol1"}},
|
||||
}
|
||||
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
path := mounter.GetPath()
|
||||
if path != "/vol1" {
|
||||
t.Errorf("Got unexpected path: %s", path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
|
||||
unmounter, err := plug.NewUnmounter("vol1", types.UID("poduid"))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
t.Errorf("Got a nil Unmounter")
|
||||
}
|
||||
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "pvA",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "foo"},
|
||||
},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
Name: "claimA",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
claim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "claimA",
|
||||
Namespace: "nsA",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pvA",
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimBound,
|
||||
},
|
||||
}
|
||||
|
||||
client := fake.NewSimpleClientset(pv, claim)
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", client, nil))
|
||||
plug, _ := plugMgr.FindPluginByName(hostPathPluginName)
|
||||
|
||||
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, true)
|
||||
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
|
||||
|
||||
if !mounter.GetAttributes().ReadOnly {
|
||||
t.Errorf("Expected true for mounter.IsReadOnly")
|
||||
}
|
||||
}
|
||||
51
vendor/k8s.io/kubernetes/pkg/volume/iscsi/BUILD
generated
vendored
Normal file
51
vendor/k8s.io/kubernetes/pkg/volume/iscsi/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"disk_manager.go",
|
||||
"doc.go",
|
||||
"iscsi.go",
|
||||
"iscsi_util.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"iscsi_test.go",
|
||||
"iscsi_util_test.go",
|
||||
],
|
||||
library = "go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/testing:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue