Add glide.yaml and vendor deps

This commit is contained in:
Dalton Hubble 2016-12-03 22:43:32 -08:00
parent db918f12ad
commit 5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions

56
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/BUILD generated vendored Normal file
View file

@ -0,0 +1,56 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"attacher.go",
"aws_ebs.go",
"aws_util.go",
"doc.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/resource:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/aws:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/exec:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = [
"attacher_test.go",
"aws_ebs_test.go",
],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/cloudprovider/providers/aws:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/testing:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor:github.com/golang/glog",
],
)

3
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/OWNERS generated vendored Normal file
View file

@ -0,0 +1,3 @@
maintainers:
- jsafrane
- justinsb

250
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher.go generated vendored Normal file
View file

@ -0,0 +1,250 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws_ebs
import (
"fmt"
"os"
"path"
"strconv"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
type awsElasticBlockStoreAttacher struct {
host volume.VolumeHost
awsVolumes aws.Volumes
}
var _ volume.Attacher = &awsElasticBlockStoreAttacher{}
var _ volume.AttachableVolumePlugin = &awsElasticBlockStorePlugin{}
func (plugin *awsElasticBlockStorePlugin) NewAttacher() (volume.Attacher, error) {
awsCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
return nil, err
}
return &awsElasticBlockStoreAttacher{
host: plugin.host,
awsVolumes: awsCloud,
}, nil
}
func (plugin *awsElasticBlockStorePlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter()
return mount.GetMountRefs(mounter, deviceMountPath)
}
func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
volumeSource, readOnly, err := getVolumeSource(spec)
if err != nil {
return "", err
}
volumeID := aws.KubernetesVolumeID(volumeSource.VolumeID)
// awsCloud.AttachDisk checks if disk is already attached to node and
// succeeds in that case, so no need to do that separately.
devicePath, err := attacher.awsVolumes.AttachDisk(volumeID, nodeName, readOnly)
if err != nil {
glog.Errorf("Error attaching volume %q: %+v", volumeID, err)
return "", err
}
return devicePath, nil
}
func (attacher *awsElasticBlockStoreAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
volumesAttachedCheck := make(map[*volume.Spec]bool)
volumeSpecMap := make(map[aws.KubernetesVolumeID]*volume.Spec)
volumeIDList := []aws.KubernetesVolumeID{}
for _, spec := range specs {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
continue
}
name := aws.KubernetesVolumeID(volumeSource.VolumeID)
volumeIDList = append(volumeIDList, name)
volumesAttachedCheck[spec] = true
volumeSpecMap[name] = spec
}
attachedResult, err := attacher.awsVolumes.DisksAreAttached(volumeIDList, nodeName)
if err != nil {
// Log error and continue with attach
glog.Errorf(
"Error checking if volumes (%v) is already attached to current node (%q). err=%v",
volumeIDList, nodeName, err)
return volumesAttachedCheck, err
}
for volumeID, attached := range attachedResult {
if !attached {
spec := volumeSpecMap[volumeID]
volumesAttachedCheck[spec] = false
glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name())
}
}
return volumesAttachedCheck, nil
}
func (attacher *awsElasticBlockStoreAttacher) WaitForAttach(spec *volume.Spec, devicePath string, timeout time.Duration) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
volumeID := volumeSource.VolumeID
partition := ""
if volumeSource.Partition != 0 {
partition = strconv.Itoa(int(volumeSource.Partition))
}
if devicePath == "" {
return "", fmt.Errorf("WaitForAttach failed for AWS Volume %q: devicePath is empty.", volumeID)
}
ticker := time.NewTicker(checkSleepDuration)
defer ticker.Stop()
timer := time.NewTimer(timeout)
defer timer.Stop()
for {
select {
case <-ticker.C:
glog.V(5).Infof("Checking AWS Volume %q is attached.", volumeID)
if devicePath != "" {
devicePaths := getDiskByIdPaths(partition, devicePath)
path, err := verifyDevicePath(devicePaths)
if err != nil {
// Log error, if any, and continue checking periodically. See issue #11321
glog.Errorf("Error verifying AWS Volume (%q) is attached: %v", volumeID, err)
} else if path != "" {
// A device path has successfully been created for the PD
glog.Infof("Successfully found attached AWS Volume %q.", volumeID)
return path, nil
}
} else {
glog.V(5).Infof("AWS Volume (%q) is not attached yet", volumeID)
}
case <-timer.C:
return "", fmt.Errorf("Could not find attached AWS Volume %q. Timeout waiting for mount paths to be created.", volumeID)
}
}
}
func (attacher *awsElasticBlockStoreAttacher) GetDeviceMountPath(
spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return makeGlobalPDPath(attacher.host, aws.KubernetesVolumeID(volumeSource.VolumeID)), nil
}
// FIXME: this method can be further pruned.
func (attacher *awsElasticBlockStoreAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
mounter := attacher.host.GetMounter()
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
return err
}
notMnt = true
} else {
return err
}
}
volumeSource, readOnly, err := getVolumeSource(spec)
if err != nil {
return err
}
options := []string{}
if readOnly {
options = append(options, "ro")
}
if notMnt {
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, options)
if err != nil {
os.Remove(deviceMountPath)
return err
}
}
return nil
}
type awsElasticBlockStoreDetacher struct {
mounter mount.Interface
awsVolumes aws.Volumes
}
var _ volume.Detacher = &awsElasticBlockStoreDetacher{}
func (plugin *awsElasticBlockStorePlugin) NewDetacher() (volume.Detacher, error) {
awsCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
return nil, err
}
return &awsElasticBlockStoreDetacher{
mounter: plugin.host.GetMounter(),
awsVolumes: awsCloud,
}, nil
}
func (detacher *awsElasticBlockStoreDetacher) Detach(deviceMountPath string, nodeName types.NodeName) error {
volumeID := aws.KubernetesVolumeID(path.Base(deviceMountPath))
attached, err := detacher.awsVolumes.DiskIsAttached(volumeID, nodeName)
if err != nil {
// Log error and continue with detach
glog.Errorf(
"Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v",
volumeID, nodeName, err)
}
if err == nil && !attached {
// Volume is already detached from node.
glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volumeID, nodeName)
return nil
}
if _, err = detacher.awsVolumes.DetachDisk(volumeID, nodeName); err != nil {
glog.Errorf("Error detaching volumeID %q: %v", volumeID, err)
return err
}
return nil
}
func (detacher *awsElasticBlockStoreDetacher) UnmountDevice(deviceMountPath string) error {
return volumeutil.UnmountPath(deviceMountPath, detacher.mounter)
}

View file

@ -0,0 +1,342 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws_ebs
import (
"errors"
"testing"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/types"
)
func TestGetVolumeName_Volume(t *testing.T) {
plugin := newPlugin()
name := aws.KubernetesVolumeID("my-aws-volume")
spec := createVolSpec(name, false)
volumeName, err := plugin.GetVolumeName(spec)
if err != nil {
t.Errorf("GetVolumeName error: %v", err)
}
if volumeName != string(name) {
t.Errorf("GetVolumeName error: expected %s, got %s", name, volumeName)
}
}
func TestGetVolumeName_PersistentVolume(t *testing.T) {
plugin := newPlugin()
name := aws.KubernetesVolumeID("my-aws-pv")
spec := createPVSpec(name, true)
volumeName, err := plugin.GetVolumeName(spec)
if err != nil {
t.Errorf("GetVolumeName error: %v", err)
}
if volumeName != string(name) {
t.Errorf("GetVolumeName error: expected %s, got %s", name, volumeName)
}
}
// One testcase for TestAttachDetach table test below
type testcase struct {
name aws.KubernetesVolumeID
// For fake AWS:
attach attachCall
detach detachCall
diskIsAttached diskIsAttachedCall
t *testing.T
// Actual test to run
test func(test *testcase) (string, error)
// Expected return of the test
expectedDevice string
expectedError error
}
func TestAttachDetach(t *testing.T) {
diskName := aws.KubernetesVolumeID("disk")
nodeName := types.NodeName("instance")
readOnly := false
spec := createVolSpec(diskName, readOnly)
attachError := errors.New("Fake attach error")
detachError := errors.New("Fake detach error")
diskCheckError := errors.New("Fake DiskIsAttached error")
tests := []testcase{
// Successful Attach call
{
name: "Attach_Positive",
attach: attachCall{diskName, nodeName, readOnly, "/dev/sda", nil},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
return attacher.Attach(spec, nodeName)
},
expectedDevice: "/dev/sda",
},
// Attach call fails
{
name: "Attach_Negative",
attach: attachCall{diskName, nodeName, readOnly, "", attachError},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
return attacher.Attach(spec, nodeName)
},
expectedError: attachError,
},
// Detach succeeds
{
name: "Detach_Positive",
diskIsAttached: diskIsAttachedCall{diskName, nodeName, true, nil},
detach: detachCall{diskName, nodeName, "/dev/sda", nil},
test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase)
mountPath := "/mnt/" + string(diskName)
return "", detacher.Detach(mountPath, nodeName)
},
},
// Disk is already detached
{
name: "Detach_Positive_AlreadyDetached",
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, nil},
test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase)
mountPath := "/mnt/" + string(diskName)
return "", detacher.Detach(mountPath, nodeName)
},
},
// Detach succeeds when DiskIsAttached fails
{
name: "Detach_Positive_CheckFails",
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
detach: detachCall{diskName, nodeName, "/dev/sda", nil},
test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase)
mountPath := "/mnt/" + string(diskName)
return "", detacher.Detach(mountPath, nodeName)
},
},
// Detach fails
{
name: "Detach_Negative",
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
detach: detachCall{diskName, nodeName, "", detachError},
test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase)
mountPath := "/mnt/" + string(diskName)
return "", detacher.Detach(mountPath, nodeName)
},
expectedError: detachError,
},
}
for _, testcase := range tests {
testcase.t = t
device, err := testcase.test(&testcase)
if err != testcase.expectedError {
t.Errorf("%s failed: expected err=%q, got %q", testcase.name, testcase.expectedError.Error(), err.Error())
}
if device != testcase.expectedDevice {
t.Errorf("%s failed: expected device=%q, got %q", testcase.name, testcase.expectedDevice, device)
}
t.Logf("Test %q succeeded", testcase.name)
}
}
// newPlugin creates a new gcePersistentDiskPlugin with fake cloud, NewAttacher
// and NewDetacher won't work.
func newPlugin() *awsElasticBlockStorePlugin {
host := volumetest.NewFakeVolumeHost("/tmp", nil, nil)
plugins := ProbeVolumePlugins()
plugin := plugins[0]
plugin.Init(host)
return plugin.(*awsElasticBlockStorePlugin)
}
func newAttacher(testcase *testcase) *awsElasticBlockStoreAttacher {
return &awsElasticBlockStoreAttacher{
host: nil,
awsVolumes: testcase,
}
}
func newDetacher(testcase *testcase) *awsElasticBlockStoreDetacher {
return &awsElasticBlockStoreDetacher{
awsVolumes: testcase,
}
}
func createVolSpec(name aws.KubernetesVolumeID, readOnly bool) *volume.Spec {
return &volume.Spec{
Volume: &v1.Volume{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: string(name),
ReadOnly: readOnly,
},
},
},
}
}
func createPVSpec(name aws.KubernetesVolumeID, readOnly bool) *volume.Spec {
return &volume.Spec{
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: string(name),
ReadOnly: readOnly,
},
},
},
},
}
}
// Fake AWS implementation
type attachCall struct {
diskName aws.KubernetesVolumeID
nodeName types.NodeName
readOnly bool
retDeviceName string
ret error
}
type detachCall struct {
diskName aws.KubernetesVolumeID
nodeName types.NodeName
retDeviceName string
ret error
}
type diskIsAttachedCall struct {
diskName aws.KubernetesVolumeID
nodeName types.NodeName
isAttached bool
ret error
}
func (testcase *testcase) AttachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName, readOnly bool) (string, error) {
expected := &testcase.attach
if expected.diskName == "" && expected.nodeName == "" {
// testcase.attach looks uninitialized, test did not expect to call
// AttachDisk
testcase.t.Errorf("Unexpected AttachDisk call!")
return "", errors.New("Unexpected AttachDisk call!")
}
if expected.diskName != diskName {
testcase.t.Errorf("Unexpected AttachDisk call: expected diskName %s, got %s", expected.diskName, diskName)
return "", errors.New("Unexpected AttachDisk call: wrong diskName")
}
if expected.nodeName != nodeName {
testcase.t.Errorf("Unexpected AttachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
return "", errors.New("Unexpected AttachDisk call: wrong nodeName")
}
if expected.readOnly != readOnly {
testcase.t.Errorf("Unexpected AttachDisk call: expected readOnly %v, got %v", expected.readOnly, readOnly)
return "", errors.New("Unexpected AttachDisk call: wrong readOnly")
}
glog.V(4).Infof("AttachDisk call: %s, %s, %v, returning %q, %v", diskName, nodeName, readOnly, expected.retDeviceName, expected.ret)
return expected.retDeviceName, expected.ret
}
func (testcase *testcase) DetachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName) (string, error) {
expected := &testcase.detach
if expected.diskName == "" && expected.nodeName == "" {
// testcase.detach looks uninitialized, test did not expect to call
// DetachDisk
testcase.t.Errorf("Unexpected DetachDisk call!")
return "", errors.New("Unexpected DetachDisk call!")
}
if expected.diskName != diskName {
testcase.t.Errorf("Unexpected DetachDisk call: expected diskName %s, got %s", expected.diskName, diskName)
return "", errors.New("Unexpected DetachDisk call: wrong diskName")
}
if expected.nodeName != nodeName {
testcase.t.Errorf("Unexpected DetachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
return "", errors.New("Unexpected DetachDisk call: wrong nodeName")
}
glog.V(4).Infof("DetachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceName, expected.ret)
return expected.retDeviceName, expected.ret
}
func (testcase *testcase) DiskIsAttached(diskName aws.KubernetesVolumeID, nodeName types.NodeName) (bool, error) {
expected := &testcase.diskIsAttached
if expected.diskName == "" && expected.nodeName == "" {
// testcase.diskIsAttached looks uninitialized, test did not expect to
// call DiskIsAttached
testcase.t.Errorf("Unexpected DiskIsAttached call!")
return false, errors.New("Unexpected DiskIsAttached call!")
}
if expected.diskName != diskName {
testcase.t.Errorf("Unexpected DiskIsAttached call: expected diskName %s, got %s", expected.diskName, diskName)
return false, errors.New("Unexpected DiskIsAttached call: wrong diskName")
}
if expected.nodeName != nodeName {
testcase.t.Errorf("Unexpected DiskIsAttached call: expected nodeName %s, got %s", expected.nodeName, nodeName)
return false, errors.New("Unexpected DiskIsAttached call: wrong nodeName")
}
glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, nodeName, expected.isAttached, expected.ret)
return expected.isAttached, expected.ret
}
func (testcase *testcase) DisksAreAttached(diskNames []aws.KubernetesVolumeID, nodeName types.NodeName) (map[aws.KubernetesVolumeID]bool, error) {
return nil, errors.New("Not implemented")
}
func (testcase *testcase) CreateDisk(volumeOptions *aws.VolumeOptions) (volumeName aws.KubernetesVolumeID, err error) {
return "", errors.New("Not implemented")
}
func (testcase *testcase) DeleteDisk(volumeName aws.KubernetesVolumeID) (bool, error) {
return false, errors.New("Not implemented")
}
func (testcase *testcase) GetVolumeLabels(volumeName aws.KubernetesVolumeID) (map[string]string, error) {
return map[string]string{}, errors.New("Not implemented")
}
func (testcase *testcase) GetDiskPath(volumeName aws.KubernetesVolumeID) (string, error) {
return "", errors.New("Not implemented")
}

493
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go generated vendored Normal file
View file

@ -0,0 +1,493 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws_ebs
import (
"fmt"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&awsElasticBlockStorePlugin{nil}}
}
type awsElasticBlockStorePlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.PersistentVolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.DeletableVolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.ProvisionableVolumePlugin = &awsElasticBlockStorePlugin{}
const (
awsElasticBlockStorePluginName = "kubernetes.io/aws-ebs"
awsURLNamePrefix = "aws://"
)
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
return host.GetPodVolumeDir(uid, kstrings.EscapeQualifiedNameForDisk(awsElasticBlockStorePluginName), volName)
}
func (plugin *awsElasticBlockStorePlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *awsElasticBlockStorePlugin) GetPluginName() string {
return awsElasticBlockStorePluginName
}
func (plugin *awsElasticBlockStorePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return volumeSource.VolumeID, nil
}
func (plugin *awsElasticBlockStorePlugin) CanSupport(spec *volume.Spec) bool {
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AWSElasticBlockStore != nil) ||
(spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil)
}
func (plugin *awsElasticBlockStorePlugin) RequiresRemount() bool {
return false
}
func (plugin *awsElasticBlockStorePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
}
}
func (plugin *awsElasticBlockStorePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newMounterInternal(spec, pod.UID, &AWSDiskUtil{}, plugin.host.GetMounter())
}
func (plugin *awsElasticBlockStorePlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Mounter, error) {
// EBSs used directly in a pod have a ReadOnly flag set by the pod author.
// EBSs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
ebs, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
volumeID := aws.KubernetesVolumeID(ebs.VolumeID)
fsType := ebs.FSType
partition := ""
if ebs.Partition != 0 {
partition = strconv.Itoa(int(ebs.Partition))
}
return &awsElasticBlockStoreMounter{
awsElasticBlockStore: &awsElasticBlockStore{
podUID: podUID,
volName: spec.Name(),
volumeID: volumeID,
partition: partition,
manager: manager,
mounter: mounter,
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
},
fsType: fsType,
readOnly: readOnly,
diskMounter: &mount.SafeFormatAndMount{Interface: plugin.host.GetMounter(), Runner: exec.New()}}, nil
}
func (plugin *awsElasticBlockStorePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newUnmounterInternal(volName, podUID, &AWSDiskUtil{}, plugin.host.GetMounter())
}
func (plugin *awsElasticBlockStorePlugin) newUnmounterInternal(volName string, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Unmounter, error) {
return &awsElasticBlockStoreUnmounter{&awsElasticBlockStore{
podUID: podUID,
volName: volName,
manager: manager,
mounter: mounter,
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)),
}}, nil
}
func (plugin *awsElasticBlockStorePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
return plugin.newDeleterInternal(spec, &AWSDiskUtil{})
}
func (plugin *awsElasticBlockStorePlugin) newDeleterInternal(spec *volume.Spec, manager ebsManager) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AWSElasticBlockStore == nil {
glog.Errorf("spec.PersistentVolumeSource.AWSElasticBlockStore is nil")
return nil, fmt.Errorf("spec.PersistentVolumeSource.AWSElasticBlockStore is nil")
}
return &awsElasticBlockStoreDeleter{
awsElasticBlockStore: &awsElasticBlockStore{
volName: spec.Name(),
volumeID: aws.KubernetesVolumeID(spec.PersistentVolume.Spec.AWSElasticBlockStore.VolumeID),
manager: manager,
plugin: plugin,
}}, nil
}
func (plugin *awsElasticBlockStorePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
return plugin.newProvisionerInternal(options, &AWSDiskUtil{})
}
func (plugin *awsElasticBlockStorePlugin) newProvisionerInternal(options volume.VolumeOptions, manager ebsManager) (volume.Provisioner, error) {
return &awsElasticBlockStoreProvisioner{
awsElasticBlockStore: &awsElasticBlockStore{
manager: manager,
plugin: plugin,
},
options: options,
}, nil
}
func getVolumeSource(
spec *volume.Spec) (*v1.AWSElasticBlockStoreVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil {
return spec.Volume.AWSElasticBlockStore, spec.Volume.AWSElasticBlockStore.ReadOnly, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.AWSElasticBlockStore != nil {
return spec.PersistentVolume.Spec.AWSElasticBlockStore, spec.ReadOnly, nil
}
return nil, false, fmt.Errorf("Spec does not reference an AWS EBS volume type")
}
func (plugin *awsElasticBlockStorePlugin) ConstructVolumeSpec(volName, mountPath string) (*volume.Spec, error) {
mounter := plugin.host.GetMounter()
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
volumeID, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
if err != nil {
return nil, err
}
// This is a workaround to fix the issue in converting aws volume id from globalPDPath
// There are three aws volume id formats and their volumeID from GetDeviceNameFromMount() are:
// aws:///vol-1234 (aws/vol-1234)
// aws://us-east-1/vol-1234 (aws/us-east-1/vol-1234)
// vol-1234 (vol-1234)
// This code is for converting volume id to aws style volume id for the first two cases.
sourceName := volumeID
if strings.HasPrefix(volumeID, "aws/") {
names := strings.Split(volumeID, "/")
length := len(names)
if length < 2 || length > 3 {
return nil, fmt.Errorf("Failed to get AWS volume id from mount path %q: invalid volume name format %q", mountPath, volumeID)
}
volName := names[length-1]
if !strings.HasPrefix(volName, "vol-") {
return nil, fmt.Errorf("Invalid volume name format for AWS volume (%q) retrieved from mount path %q", volName, mountPath)
}
if length == 2 {
sourceName = awsURLNamePrefix + "" + "/" + volName // empty zone label
}
if length == 3 {
sourceName = awsURLNamePrefix + names[1] + "/" + volName // names[1] is the zone label
}
glog.V(4).Infof("Convert aws volume name from %q to %q ", volumeID, sourceName)
}
awsVolume := &v1.Volume{
Name: volName,
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: sourceName,
},
},
}
return volume.NewSpecFromVolume(awsVolume), nil
}
// Abstract interface to PD operations.
type ebsManager interface {
CreateVolume(provisioner *awsElasticBlockStoreProvisioner) (volumeID aws.KubernetesVolumeID, volumeSizeGB int, labels map[string]string, err error)
// Deletes a volume
DeleteVolume(deleter *awsElasticBlockStoreDeleter) error
}
// awsElasticBlockStore volumes are disk resources provided by Amazon Web Services
// that are attached to the kubelet's host machine and exposed to the pod.
type awsElasticBlockStore struct {
volName string
podUID types.UID
// Unique id of the PD, used to find the disk resource in the provider.
volumeID aws.KubernetesVolumeID
// Specifies the partition to mount
partition string
// Utility interface that provides API calls to the provider to attach/detach disks.
manager ebsManager
// Mounter interface that provides system calls to mount the global path to the pod local path.
mounter mount.Interface
plugin *awsElasticBlockStorePlugin
volume.MetricsProvider
}
type awsElasticBlockStoreMounter struct {
*awsElasticBlockStore
// Filesystem type, optional.
fsType string
// Specifies whether the disk will be attached as read-only.
readOnly bool
// diskMounter provides the interface that is used to mount the actual block device.
diskMounter *mount.SafeFormatAndMount
}
var _ volume.Mounter = &awsElasticBlockStoreMounter{}
func (b *awsElasticBlockStoreMounter) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: b.readOnly,
Managed: !b.readOnly,
SupportsSELinux: true,
}
}
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (b *awsElasticBlockStoreMounter) CanMount() error {
return nil
}
// SetUp attaches the disk and bind mounts to the volume path.
func (b *awsElasticBlockStoreMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup)
}
// SetUpAt attaches the disk and bind mounts to the volume path.
func (b *awsElasticBlockStoreMounter) SetUpAt(dir string, fsGroup *int64) error {
// TODO: handle failed mounts here.
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
glog.V(4).Infof("PersistentDisk set up: %s %v %v", dir, !notMnt, err)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("cannot validate mount point: %s %v", dir, err)
return err
}
if !notMnt {
return nil
}
globalPDPath := makeGlobalPDPath(b.plugin.host, b.volumeID)
if err := os.MkdirAll(dir, 0750); err != nil {
return err
}
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
options := []string{"bind"}
if b.readOnly {
options = append(options, "ro")
}
err = b.mounter.Mount(globalPDPath, dir, "", options)
if err != nil {
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr)
return err
}
if !notMnt {
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
glog.Errorf("failed to unmount %s: %v", dir, mntErr)
return err
}
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr)
return err
}
if !notMnt {
// This is very odd, we don't expect it. We'll try again next sync loop.
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir)
return err
}
}
os.Remove(dir)
glog.Errorf("Mount of disk %s failed: %v", dir, err)
return err
}
if !b.readOnly {
volume.SetVolumeOwnership(b, fsGroup)
}
glog.V(4).Infof("Successfully mounted %s", dir)
return nil
}
func makeGlobalPDPath(host volume.VolumeHost, volumeID aws.KubernetesVolumeID) string {
// Clean up the URI to be more fs-friendly
name := string(volumeID)
name = strings.Replace(name, "://", "/", -1)
return path.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath, name)
}
// Reverses the mapping done in makeGlobalPDPath
func getVolumeIDFromGlobalMount(host volume.VolumeHost, globalPath string) (string, error) {
basePath := path.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath)
rel, err := filepath.Rel(basePath, globalPath)
if err != nil {
glog.Errorf("Failed to get volume id from global mount %s - %v", globalPath, err)
return "", err
}
if strings.Contains(rel, "../") {
glog.Errorf("Unexpected mount path: %s", globalPath)
return "", fmt.Errorf("unexpected mount path: " + globalPath)
}
// Reverse the :// replacement done in makeGlobalPDPath
volumeID := rel
if strings.HasPrefix(volumeID, "aws/") {
volumeID = strings.Replace(volumeID, "aws/", "aws://", 1)
}
glog.V(2).Info("Mapping mount dir ", globalPath, " to volumeID ", volumeID)
return volumeID, nil
}
func (ebs *awsElasticBlockStore) GetPath() string {
return getPath(ebs.podUID, ebs.volName, ebs.plugin.host)
}
type awsElasticBlockStoreUnmounter struct {
*awsElasticBlockStore
}
var _ volume.Unmounter = &awsElasticBlockStoreUnmounter{}
// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (c *awsElasticBlockStoreUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath())
}
// Unmounts the bind mount
func (c *awsElasticBlockStoreUnmounter) TearDownAt(dir string) error {
notMnt, err := c.mounter.IsLikelyNotMountPoint(dir)
if err != nil {
glog.V(2).Info("Error checking if mountpoint ", dir, ": ", err)
return err
}
if notMnt {
glog.V(2).Info("Not mountpoint, deleting")
return os.Remove(dir)
}
// Unmount the bind-mount inside this pod
if err := c.mounter.Unmount(dir); err != nil {
glog.V(2).Info("Error unmounting dir ", dir, ": ", err)
return err
}
notMnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if notMnt {
if err := os.Remove(dir); err != nil {
glog.V(2).Info("Error removing mountpoint ", dir, ": ", err)
return err
}
}
return nil
}
type awsElasticBlockStoreDeleter struct {
*awsElasticBlockStore
}
var _ volume.Deleter = &awsElasticBlockStoreDeleter{}
func (d *awsElasticBlockStoreDeleter) GetPath() string {
return getPath(d.podUID, d.volName, d.plugin.host)
}
func (d *awsElasticBlockStoreDeleter) Delete() error {
return d.manager.DeleteVolume(d)
}
type awsElasticBlockStoreProvisioner struct {
*awsElasticBlockStore
options volume.VolumeOptions
namespace string
}
var _ volume.Provisioner = &awsElasticBlockStoreProvisioner{}
func (c *awsElasticBlockStoreProvisioner) Provision() (*v1.PersistentVolume, error) {
volumeID, sizeGB, labels, err := c.manager.CreateVolume(c)
if err != nil {
glog.Errorf("Provision failed: %v", err)
return nil, err
}
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: c.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
"kubernetes.io/createdby": "aws-ebs-dynamic-provisioner",
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
AccessModes: c.options.PVC.Spec.AccessModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: string(volumeID),
FSType: "ext4",
Partition: 0,
ReadOnly: false,
},
},
},
}
if len(c.options.PVC.Spec.AccessModes) == 0 {
pv.Spec.AccessModes = c.plugin.GetAccessModes()
}
if len(labels) != 0 {
if pv.Labels == nil {
pv.Labels = make(map[string]string)
}
for k, v := range labels {
pv.Labels[k] = v
}
}
return pv, nil
}

View file

@ -0,0 +1,298 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws_ebs
import (
"fmt"
"os"
"path"
"testing"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.GetPluginName() != "kubernetes.io/aws-ebs" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
func TestGetAccessModes(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/aws-ebs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if !contains(plug.GetAccessModes(), v1.ReadWriteOnce) {
t.Errorf("Expected to support AccessModeTypes: %s", v1.ReadWriteOnce)
}
if contains(plug.GetAccessModes(), v1.ReadOnlyMany) {
t.Errorf("Expected not to support AccessModeTypes: %s", v1.ReadOnlyMany)
}
}
func contains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
}
}
return false
}
type fakePDManager struct {
}
// TODO(jonesdl) To fully test this, we could create a loopback device
// and mount that instead.
func (fake *fakePDManager) CreateVolume(c *awsElasticBlockStoreProvisioner) (volumeID aws.KubernetesVolumeID, volumeSizeGB int, labels map[string]string, err error) {
labels = make(map[string]string)
labels["fakepdmanager"] = "yes"
return "test-aws-volume-name", 100, labels, nil
}
func (fake *fakePDManager) DeleteVolume(cd *awsElasticBlockStoreDeleter) error {
if cd.volumeID != "test-aws-volume-name" {
return fmt.Errorf("Deleter got unexpected volume name: %s", cd.volumeID)
}
return nil
}
func TestPlugin(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: "pd",
FSType: "ext4",
},
},
}
fakeManager := &fakePDManager{}
fakeMounter := &mount.FakeMounter{}
mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~aws-ebs/vol1")
path := mounter.GetPath()
if path != volPath {
t.Errorf("Got unexpected path: %s", path)
}
if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
fakeManager = &fakePDManager{}
unmounter, err := plug.(*awsElasticBlockStorePlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Errorf("Got a nil Unmounter")
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", path)
} else if !os.IsNotExist(err) {
t.Errorf("SetUp() failed: %v", err)
}
// Test Provisioner
options := volume.VolumeOptions{
PVC: volumetest.CreateTestPVC("100Mi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
provisioner, err := plug.(*awsElasticBlockStorePlugin).newProvisionerInternal(options, &fakePDManager{})
persistentSpec, err := provisioner.Provision()
if err != nil {
t.Errorf("Provision() failed: %v", err)
}
if persistentSpec.Spec.PersistentVolumeSource.AWSElasticBlockStore.VolumeID != "test-aws-volume-name" {
t.Errorf("Provision() returned unexpected volume ID: %s", persistentSpec.Spec.PersistentVolumeSource.AWSElasticBlockStore.VolumeID)
}
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
size := cap.Value()
if size != 100*1024*1024*1024 {
t.Errorf("Provision() returned unexpected volume size: %v", size)
}
if persistentSpec.Labels["fakepdmanager"] != "yes" {
t.Errorf("Provision() returned unexpected labels: %v", persistentSpec.Labels)
}
// Test Deleter
volSpec := &volume.Spec{
PersistentVolume: persistentSpec,
}
deleter, err := plug.(*awsElasticBlockStorePlugin).newDeleterInternal(volSpec, &fakePDManager{})
err = deleter.Delete()
if err != nil {
t.Errorf("Deleter() failed: %v", err)
}
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
clientset := fake.NewSimpleClientset(pv, claim)
tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, clientset, nil))
plug, _ := plugMgr.FindPluginByName(awsElasticBlockStorePluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
func TestMounterAndUnmounterTypeAssert(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: "pd",
FSType: "ext4",
},
},
}
mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{})
if _, ok := mounter.(volume.Unmounter); ok {
t.Errorf("Volume Mounter can be type-assert to Unmounter")
}
unmounter, err := plug.(*awsElasticBlockStorePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{})
if _, ok := unmounter.(volume.Mounter); ok {
t.Errorf("Volume Unmounter can be type-assert to Mounter")
}
}

191
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go generated vendored Normal file
View file

@ -0,0 +1,191 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws_ebs
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
const (
diskPartitionSuffix = ""
diskXVDPath = "/dev/xvd"
diskXVDPattern = "/dev/xvd*"
maxChecks = 60
maxRetries = 10
checkSleepDuration = time.Second
errorSleepDuration = 5 * time.Second
)
type AWSDiskUtil struct{}
func (util *AWSDiskUtil) DeleteVolume(d *awsElasticBlockStoreDeleter) error {
cloud, err := getCloudProvider(d.awsElasticBlockStore.plugin.host.GetCloudProvider())
if err != nil {
return err
}
deleted, err := cloud.DeleteDisk(d.volumeID)
if err != nil {
// AWS cloud provider returns volume.deletedVolumeInUseError when
// necessary, no handling needed here.
glog.V(2).Infof("Error deleting EBS Disk volume %s: %v", d.volumeID, err)
return err
}
if deleted {
glog.V(2).Infof("Successfully deleted EBS Disk volume %s", d.volumeID)
} else {
glog.V(2).Infof("Successfully deleted EBS Disk volume %s (actually already deleted)", d.volumeID)
}
return nil
}
// CreateVolume creates an AWS EBS volume.
// Returns: volumeID, volumeSizeGB, labels, error
func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (aws.KubernetesVolumeID, int, map[string]string, error) {
cloud, err := getCloudProvider(c.awsElasticBlockStore.plugin.host.GetCloudProvider())
if err != nil {
return "", 0, nil, err
}
// AWS volumes don't have Name field, store the name in Name tag
var tags map[string]string
if c.options.CloudTags == nil {
tags = make(map[string]string)
} else {
tags = *c.options.CloudTags
}
tags["Name"] = volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // AWS tags can have 255 characters
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestBytes := capacity.Value()
// AWS works with gigabytes, convert to GiB with rounding up
requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
volumeOptions := &aws.VolumeOptions{
CapacityGB: requestGB,
Tags: tags,
PVCName: c.options.PVC.Name,
}
// Apply Parameters (case-insensitive). We leave validation of
// the values to the cloud provider.
for k, v := range c.options.Parameters {
switch strings.ToLower(k) {
case "type":
volumeOptions.VolumeType = v
case "zone":
volumeOptions.AvailabilityZone = v
case "iopspergb":
volumeOptions.IOPSPerGB, err = strconv.Atoi(v)
if err != nil {
return "", 0, nil, fmt.Errorf("invalid iopsPerGB value %q, must be integer between 1 and 30: %v", v, err)
}
case "encrypted":
volumeOptions.Encrypted, err = strconv.ParseBool(v)
if err != nil {
return "", 0, nil, fmt.Errorf("invalid encrypted boolean value %q, must be true or false: %v", v, err)
}
case "kmskeyid":
volumeOptions.KmsKeyId = v
default:
return "", 0, nil, fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName())
}
}
// TODO: implement PVC.Selector parsing
if c.options.PVC.Spec.Selector != nil {
return "", 0, nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on AWS")
}
name, err := cloud.CreateDisk(volumeOptions)
if err != nil {
glog.V(2).Infof("Error creating EBS Disk volume: %v", err)
return "", 0, nil, err
}
glog.V(2).Infof("Successfully created EBS Disk volume %s", name)
labels, err := cloud.GetVolumeLabels(name)
if err != nil {
// We don't really want to leak the volume here...
glog.Errorf("error building labels for new EBS volume %q: %v", name, err)
}
return name, int(requestGB), labels, nil
}
// Returns the first path that exists, or empty string if none exist.
func verifyDevicePath(devicePaths []string) (string, error) {
for _, path := range devicePaths {
if pathExists, err := volumeutil.PathExists(path); err != nil {
return "", fmt.Errorf("Error checking if path exists: %v", err)
} else if pathExists {
return path, nil
}
}
return "", nil
}
// Returns the first path that exists, or empty string if none exist.
func verifyAllPathsRemoved(devicePaths []string) (bool, error) {
allPathsRemoved := true
for _, path := range devicePaths {
if exists, err := volumeutil.PathExists(path); err != nil {
return false, fmt.Errorf("Error checking if path exists: %v", err)
} else {
allPathsRemoved = allPathsRemoved && !exists
}
}
return allPathsRemoved, nil
}
// Returns list of all paths for given EBS mount
// This is more interesting on GCE (where we are able to identify volumes under /dev/disk-by-id)
// Here it is mostly about applying the partition path
func getDiskByIdPaths(partition string, devicePath string) []string {
devicePaths := []string{}
if devicePath != "" {
devicePaths = append(devicePaths, devicePath)
}
if partition != "" {
for i, path := range devicePaths {
devicePaths[i] = path + diskPartitionSuffix + partition
}
}
return devicePaths
}
// Return cloud provider
func getCloudProvider(cloudProvider cloudprovider.Interface) (*aws.Cloud, error) {
awsCloudProvider, ok := cloudProvider.(*aws.Cloud)
if !ok || awsCloudProvider == nil {
return nil, fmt.Errorf("Failed to get AWS Cloud Provider. GetCloudProvider returned %v instead", cloudProvider)
}
return awsCloudProvider, nil
}

19
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/doc.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package aws_ebs contains the internal representation of AWS Elastic
// Block Store volumes.
package aws_ebs // import "k8s.io/kubernetes/pkg/volume/aws_ebs"