1
0
Fork 0
forked from barak/tarpoon

Add glide.yaml and vendor deps

This commit is contained in:
Dalton Hubble 2016-12-03 22:43:32 -08:00
parent db918f12ad
commit 5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions

58
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD generated vendored Normal file
View file

@ -0,0 +1,58 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"attacher.go",
"azure_dd.go",
"azure_provision.go",
"vhd_util.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/resource:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/azure:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/exec:go_default_library",
"//pkg/util/keymutex:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor:github.com/Azure/azure-sdk-for-go/arm/compute",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = [
"azure_dd_test.go",
"vhd_util_test.go",
],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/exec:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/testing:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor:github.com/Azure/azure-sdk-for-go/arm/compute",
],
)

View file

@ -0,0 +1,282 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"fmt"
"os"
"path"
"strconv"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/arm/compute"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/keymutex"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
type azureDiskAttacher struct {
host volume.VolumeHost
azureProvider azureCloudProvider
}
var _ volume.Attacher = &azureDiskAttacher{}
var _ volume.AttachableVolumePlugin = &azureDataDiskPlugin{}
const (
checkSleepDuration = time.Second
)
// acquire lock to get an lun number
var getLunMutex = keymutex.NewKeyMutex()
// NewAttacher initializes an Attacher
func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) {
azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
glog.V(4).Infof("failed to get azure provider")
return nil, err
}
return &azureDiskAttacher{
host: plugin.host,
azureProvider: azure,
}, nil
}
// Attach attaches a volume.Spec to an Azure VM referenced by NodeName, returning the disk's LUN
func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
volumeSource, err := getVolumeSource(spec)
if err != nil {
glog.Warningf("failed to get azure disk spec")
return "", err
}
instanceid, err := attacher.azureProvider.InstanceID(nodeName)
if err != nil {
glog.Warningf("failed to get azure instance id")
return "", fmt.Errorf("failed to get azure instance id for node %q", nodeName)
}
if ind := strings.LastIndex(instanceid, "/"); ind >= 0 {
instanceid = instanceid[(ind + 1):]
}
lun, err := attacher.azureProvider.GetDiskLun(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName)
if err == cloudprovider.InstanceNotFound {
// Log error and continue with attach
glog.Warningf(
"Error checking if volume is already attached to current node (%q). Will continue and try attach anyway. err=%v",
instanceid, err)
}
if err == nil {
// Volume is already attached to node.
glog.V(4).Infof("Attach operation is successful. volume %q is already attached to node %q at lun %d.", volumeSource.DiskName, instanceid, lun)
} else {
getLunMutex.LockKey(instanceid)
defer getLunMutex.UnlockKey(instanceid)
lun, err = attacher.azureProvider.GetNextDiskLun(nodeName)
if err != nil {
glog.Warningf("no LUN available for instance %q", nodeName)
return "", fmt.Errorf("all LUNs are used, cannot attach volume %q to instance %q", volumeSource.DiskName, instanceid)
}
err = attacher.azureProvider.AttachDisk(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, lun, compute.CachingTypes(*volumeSource.CachingMode))
if err == nil {
glog.V(4).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, nodeName)
} else {
glog.V(2).Infof("Attach volume %q to instance %q failed with %v", volumeSource.DataDiskURI, instanceid, err)
return "", fmt.Errorf("Attach volume %q to instance %q failed with %v", volumeSource.DiskName, instanceid, err)
}
}
return strconv.Itoa(int(lun)), err
}
func (attacher *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
volumesAttachedCheck := make(map[*volume.Spec]bool)
volumeSpecMap := make(map[string]*volume.Spec)
volumeIDList := []string{}
for _, spec := range specs {
volumeSource, err := getVolumeSource(spec)
if err != nil {
glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
continue
}
volumeIDList = append(volumeIDList, volumeSource.DiskName)
volumesAttachedCheck[spec] = true
volumeSpecMap[volumeSource.DiskName] = spec
}
attachedResult, err := attacher.azureProvider.DisksAreAttached(volumeIDList, nodeName)
if err != nil {
// Log error and continue with attach
glog.Errorf(
"Error checking if volumes (%v) are attached to current node (%q). err=%v",
volumeIDList, nodeName, err)
return volumesAttachedCheck, err
}
for volumeID, attached := range attachedResult {
if !attached {
spec := volumeSpecMap[volumeID]
volumesAttachedCheck[spec] = false
glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name())
}
}
return volumesAttachedCheck, nil
}
// WaitForAttach runs on the node to detect if the volume (referenced by LUN) is attached. If attached, the device path is returned
func (attacher *azureDiskAttacher) WaitForAttach(spec *volume.Spec, lunStr string, timeout time.Duration) (string, error) {
volumeSource, err := getVolumeSource(spec)
if err != nil {
return "", err
}
if len(lunStr) == 0 {
return "", fmt.Errorf("WaitForAttach failed for Azure disk %q: lun is empty.", volumeSource.DiskName)
}
lun, err := strconv.Atoi(lunStr)
if err != nil {
return "", fmt.Errorf("WaitForAttach: wrong lun %q, err: %v", lunStr, err)
}
scsiHostRescan(&osIOHandler{})
exe := exec.New()
devicePath := ""
err = wait.Poll(checkSleepDuration, timeout, func() (bool, error) {
glog.V(4).Infof("Checking Azure disk %q(lun %s) is attached.", volumeSource.DiskName, lunStr)
if devicePath, err = findDiskByLun(lun, &osIOHandler{}, exe); err == nil {
glog.V(4).Infof("Successfully found attached Azure disk %q(lun %s, device path %s).", volumeSource.DiskName, lunStr, devicePath)
return true, nil
} else {
//Log error, if any, and continue checking periodically
glog.V(4).Infof("Error Stat Azure disk (%q) is attached: %v", volumeSource.DiskName, err)
return false, nil
}
})
return devicePath, err
}
// GetDeviceMountPath finds the volume's mount path on the node
func (attacher *azureDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
volumeSource, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return makeGlobalPDPath(attacher.host, volumeSource.DiskName), nil
}
// MountDevice runs mount command on the node to mount the volume
func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
mounter := attacher.host.GetMounter()
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
return err
}
notMnt = true
} else {
return err
}
}
volumeSource, err := getVolumeSource(spec)
if err != nil {
return err
}
options := []string{}
if spec.ReadOnly {
options = append(options, "ro")
}
if notMnt {
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, *volumeSource.FSType, options)
if err != nil {
os.Remove(deviceMountPath)
return err
}
}
return nil
}
type azureDiskDetacher struct {
mounter mount.Interface
azureProvider azureCloudProvider
}
var _ volume.Detacher = &azureDiskDetacher{}
// NewDetacher initializes a volume Detacher
func (plugin *azureDataDiskPlugin) NewDetacher() (volume.Detacher, error) {
azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
return nil, err
}
return &azureDiskDetacher{
mounter: plugin.host.GetMounter(),
azureProvider: azure,
}, nil
}
// Detach detaches disk from Azure VM.
func (detacher *azureDiskDetacher) Detach(diskName string, nodeName types.NodeName) error {
if diskName == "" {
return fmt.Errorf("invalid disk to detach: %q", diskName)
}
instanceid, err := detacher.azureProvider.InstanceID(nodeName)
if err != nil {
glog.Warningf("no instance id for node %q, skip detaching", nodeName)
return nil
}
if ind := strings.LastIndex(instanceid, "/"); ind >= 0 {
instanceid = instanceid[(ind + 1):]
}
glog.V(4).Infof("detach %v from node %q", diskName, nodeName)
err = detacher.azureProvider.DetachDiskByName(diskName, "" /* diskURI */, nodeName)
if err != nil {
glog.Errorf("failed to detach azure disk %q, err %v", diskName, err)
}
return err
}
// UnmountDevice unmounts the volume on the node
func (detacher *azureDiskDetacher) UnmountDevice(deviceMountPath string) error {
volume := path.Base(deviceMountPath)
if err := util.UnmountPath(deviceMountPath, detacher.mounter); err != nil {
glog.Errorf("Error unmounting %q: %v", volume, err)
return err
} else {
return nil
}
}

View file

@ -0,0 +1,381 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"fmt"
"os"
"path"
"github.com/Azure/azure-sdk-for-go/arm/compute"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/keymutex"
"k8s.io/kubernetes/pkg/util/mount"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&azureDataDiskPlugin{}}
}
type azureDataDiskPlugin struct {
host volume.VolumeHost
volumeLocks keymutex.KeyMutex
}
// Abstract interface to disk operations.
// azure cloud provider should implement it
type azureCloudProvider interface {
// Attaches the disk to the host machine.
AttachDisk(diskName, diskUri string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error
// Detaches the disk, identified by disk name or uri, from the host machine.
DetachDiskByName(diskName, diskUri string, nodeName types.NodeName) error
// Check if a list of volumes are attached to the node with the specified NodeName
DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error)
// Get the LUN number of the disk that is attached to the host
GetDiskLun(diskName, diskUri string, nodeName types.NodeName) (int32, error)
// Get the next available LUN number to attach a new VHD
GetNextDiskLun(nodeName types.NodeName) (int32, error)
// InstanceID returns the cloud provider ID of the specified instance.
InstanceID(nodeName types.NodeName) (string, error)
// Create a VHD blob
CreateVolume(name, storageAccount, storageType, location string, requestGB int) (string, string, int, error)
// Delete a VHD blob
DeleteVolume(name, uri string) error
}
var _ volume.VolumePlugin = &azureDataDiskPlugin{}
var _ volume.PersistentVolumePlugin = &azureDataDiskPlugin{}
const (
azureDataDiskPluginName = "kubernetes.io/azure-disk"
)
func (plugin *azureDataDiskPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
plugin.volumeLocks = keymutex.NewKeyMutex()
return nil
}
func (plugin *azureDataDiskPlugin) GetPluginName() string {
return azureDataDiskPluginName
}
func (plugin *azureDataDiskPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return volumeSource.DiskName, nil
}
func (plugin *azureDataDiskPlugin) CanSupport(spec *volume.Spec) bool {
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil) ||
(spec.Volume != nil && spec.Volume.AzureDisk != nil)
}
func (plugin *azureDataDiskPlugin) RequiresRemount() bool {
return false
}
func (plugin *azureDataDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
}
}
func (plugin *azureDataDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod.UID, plugin.host.GetMounter())
}
func (plugin *azureDataDiskPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface) (volume.Mounter, error) {
// azures used directly in a pod have a ReadOnly flag set by the pod author.
// azures used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
azure, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
fsType := "ext4"
if azure.FSType != nil {
fsType = *azure.FSType
}
cachingMode := v1.AzureDataDiskCachingNone
if azure.CachingMode != nil {
cachingMode = *azure.CachingMode
}
readOnly := false
if azure.ReadOnly != nil {
readOnly = *azure.ReadOnly
}
diskName := azure.DiskName
diskUri := azure.DataDiskURI
return &azureDiskMounter{
azureDisk: &azureDisk{
podUID: podUID,
volName: spec.Name(),
diskName: diskName,
diskUri: diskUri,
cachingMode: cachingMode,
mounter: mounter,
plugin: plugin,
},
fsType: fsType,
readOnly: readOnly,
diskMounter: &mount.SafeFormatAndMount{Interface: plugin.host.GetMounter(), Runner: exec.New()}}, nil
}
func (plugin *azureDataDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter())
}
func (plugin *azureDataDiskPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) {
return &azureDiskUnmounter{
&azureDisk{
podUID: podUID,
volName: volName,
mounter: mounter,
plugin: plugin,
},
}, nil
}
func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volName, mountPath string) (*volume.Spec, error) {
mounter := plugin.host.GetMounter()
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
if err != nil {
return nil, err
}
azVolume := &v1.Volume{
Name: volName,
VolumeSource: v1.VolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DiskName: sourceName,
},
},
}
return volume.NewSpecFromVolume(azVolume), nil
}
func (plugin *azureDataDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter()
return mount.GetMountRefs(mounter, deviceMountPath)
}
type azureDisk struct {
volName string
podUID types.UID
diskName string
diskUri string
cachingMode v1.AzureDataDiskCachingMode
mounter mount.Interface
plugin *azureDataDiskPlugin
volume.MetricsNil
}
type azureDiskMounter struct {
*azureDisk
// Filesystem type, optional.
fsType string
// Specifies whether the disk will be attached as read-only.
readOnly bool
// diskMounter provides the interface that is used to mount the actual block device.
diskMounter *mount.SafeFormatAndMount
}
var _ volume.Mounter = &azureDiskMounter{}
func (b *azureDiskMounter) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: b.readOnly,
Managed: !b.readOnly,
SupportsSELinux: true,
}
}
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (b *azureDiskMounter) CanMount() error {
return nil
}
// SetUp attaches the disk and bind mounts to the volume path.
func (b *azureDiskMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup)
}
// SetUpAt attaches the disk and bind mounts to the volume path.
func (b *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
b.plugin.volumeLocks.LockKey(b.diskName)
defer b.plugin.volumeLocks.UnlockKey(b.diskName)
// TODO: handle failed mounts here.
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
glog.V(4).Infof("DataDisk set up: %s %v %v", dir, !notMnt, err)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("IsLikelyNotMountPoint failed: %v", err)
return err
}
if !notMnt {
glog.V(4).Infof("%s is a mount point", dir)
return nil
}
globalPDPath := makeGlobalPDPath(b.plugin.host, b.diskName)
if err := os.MkdirAll(dir, 0750); err != nil {
glog.V(4).Infof("Could not create directory %s: %v", dir, err)
return err
}
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
options := []string{"bind"}
if b.readOnly {
options = append(options, "ro")
}
err = b.mounter.Mount(globalPDPath, dir, "", options)
if err != nil {
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notMnt {
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
glog.Errorf("Failed to unmount: %v", mntErr)
return err
}
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notMnt {
// This is very odd, we don't expect it. We'll try again next sync loop.
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir)
return err
}
}
os.Remove(dir)
return err
}
if !b.readOnly {
volume.SetVolumeOwnership(b, fsGroup)
}
glog.V(3).Infof("Azure disk volume %s mounted to %s", b.diskName, dir)
return nil
}
func makeGlobalPDPath(host volume.VolumeHost, volume string) string {
return path.Join(host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, volume)
}
func (azure *azureDisk) GetPath() string {
name := azureDataDiskPluginName
return azure.plugin.host.GetPodVolumeDir(azure.podUID, utilstrings.EscapeQualifiedNameForDisk(name), azure.volName)
}
type azureDiskUnmounter struct {
*azureDisk
}
var _ volume.Unmounter = &azureDiskUnmounter{}
// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (c *azureDiskUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath())
}
// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (c *azureDiskUnmounter) TearDownAt(dir string) error {
notMnt, err := c.mounter.IsLikelyNotMountPoint(dir)
if err != nil {
glog.Errorf("Error checking if mountpoint %s: %v", dir, err)
return err
}
if notMnt {
glog.V(2).Info("Not mountpoint, deleting")
return os.Remove(dir)
}
// lock the volume (and thus wait for any concurrrent SetUpAt to finish)
c.plugin.volumeLocks.LockKey(c.diskName)
defer c.plugin.volumeLocks.UnlockKey(c.diskName)
refs, err := mount.GetMountRefs(c.mounter, dir)
if err != nil {
glog.Errorf("Error getting mountrefs for %s: %v", dir, err)
return err
}
if len(refs) == 0 {
glog.Errorf("Did not find pod-mount for %s during tear down", dir)
return fmt.Errorf("%s is not mounted", dir)
}
c.diskName = path.Base(refs[0])
glog.V(4).Infof("Found volume %s mounted to %s", c.diskName, dir)
// Unmount the bind-mount inside this pod
if err := c.mounter.Unmount(dir); err != nil {
glog.Errorf("Error unmounting dir %s %v", dir, err)
return err
}
notMnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if notMnt {
if err := os.Remove(dir); err != nil {
glog.Errorf("Error removing mountpoint %s %v", dir, err)
return err
}
}
return nil
}
func getVolumeSource(spec *volume.Spec) (*v1.AzureDiskVolumeSource, error) {
if spec.Volume != nil && spec.Volume.AzureDisk != nil {
return spec.Volume.AzureDisk, nil
}
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil {
return spec.PersistentVolume.Spec.AzureDisk, nil
}
return nil, fmt.Errorf("Spec does not reference an Azure disk volume type")
}
// Return cloud provider
func getAzureCloudProvider(cloudProvider cloudprovider.Interface) (azureCloudProvider, error) {
azureCloudProvider, ok := cloudProvider.(*azure.Cloud)
if !ok || azureCloudProvider == nil {
return nil, fmt.Errorf("Failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider)
}
return azureCloudProvider, nil
}

View file

@ -0,0 +1,177 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"fmt"
"os"
"path"
"testing"
"github.com/Azure/azure-sdk-for-go/arm/compute"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("azure_dd")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName(azureDataDiskPluginName)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.GetPluginName() != azureDataDiskPluginName {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{AzureDisk: &v1.AzureDiskVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{AzureDisk: &v1.AzureDiskVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
const (
fakeDiskName = "foo"
fakeDiskUri = "https://azure/vhds/bar.vhd"
fakeLun = 2
)
type fakeAzureProvider struct {
}
func (fake *fakeAzureProvider) AttachDisk(diskName, diskUri, vmName string, lun int32, cachingMode compute.CachingTypes) error {
if diskName != fakeDiskName || diskUri != fakeDiskUri || lun != fakeLun {
return fmt.Errorf("wrong disk")
}
return nil
}
func (fake *fakeAzureProvider) DetachDiskByName(diskName, diskUri, vmName string) error {
if diskName != fakeDiskName || diskUri != fakeDiskUri {
return fmt.Errorf("wrong disk")
}
return nil
}
func (fake *fakeAzureProvider) GetDiskLun(diskName, diskUri, vmName string) (int32, error) {
return int32(fakeLun), nil
}
func (fake *fakeAzureProvider) GetNextDiskLun(vmName string) (int32, error) {
return fakeLun, nil
}
func (fake *fakeAzureProvider) InstanceID(name string) (string, error) {
return "localhost", nil
}
func (fake *fakeAzureProvider) CreateVolume(name, storageAccount, storageType, location string, requestGB int) (string, string, int, error) {
return "", "", 0, fmt.Errorf("not implemented")
}
func (fake *fakeAzureProvider) DeleteVolume(name, uri string) error {
return fmt.Errorf("not implemented")
}
func TestPlugin(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("azure_ddTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName(azureDataDiskPluginName)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
fs := "ext4"
ro := false
caching := v1.AzureDataDiskCachingNone
spec := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DiskName: fakeDiskName,
DataDiskURI: fakeDiskUri,
FSType: &fs,
CachingMode: &caching,
ReadOnly: &ro,
},
},
}
mounter, err := plug.(*azureDataDiskPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &mount.FakeMounter{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~azure-disk/vol1")
path := mounter.GetPath()
if path != volPath {
t.Errorf("Got unexpected path: %s, should be %s", path, volPath)
}
if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
unmounter, err := plug.(*azureDataDiskPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Errorf("Got a nil Unmounter")
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", path)
} else if !os.IsNotExist(err) {
t.Errorf("SetUp() failed: %v", err)
}
}

View file

@ -0,0 +1,162 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"fmt"
"strings"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
)
var _ volume.DeletableVolumePlugin = &azureDataDiskPlugin{}
var _ volume.ProvisionableVolumePlugin = &azureDataDiskPlugin{}
type azureDiskDeleter struct {
*azureDisk
azureProvider azureCloudProvider
}
func (plugin *azureDataDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
glog.V(4).Infof("failed to get azure provider")
return nil, err
}
return plugin.newDeleterInternal(spec, azure)
}
func (plugin *azureDataDiskPlugin) newDeleterInternal(spec *volume.Spec, azure azureCloudProvider) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk == nil {
return nil, fmt.Errorf("invalid PV spec")
}
diskName := spec.PersistentVolume.Spec.AzureDisk.DiskName
diskUri := spec.PersistentVolume.Spec.AzureDisk.DataDiskURI
return &azureDiskDeleter{
azureDisk: &azureDisk{
volName: spec.Name(),
diskName: diskName,
diskUri: diskUri,
plugin: plugin,
},
azureProvider: azure,
}, nil
}
func (plugin *azureDataDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
glog.V(4).Infof("failed to get azure provider")
return nil, err
}
if len(options.PVC.Spec.AccessModes) == 0 {
options.PVC.Spec.AccessModes = plugin.GetAccessModes()
}
return plugin.newProvisionerInternal(options, azure)
}
func (plugin *azureDataDiskPlugin) newProvisionerInternal(options volume.VolumeOptions, azure azureCloudProvider) (volume.Provisioner, error) {
return &azureDiskProvisioner{
azureDisk: &azureDisk{
plugin: plugin,
},
azureProvider: azure,
options: options,
}, nil
}
var _ volume.Deleter = &azureDiskDeleter{}
func (d *azureDiskDeleter) GetPath() string {
name := azureDataDiskPluginName
return d.plugin.host.GetPodVolumeDir(d.podUID, utilstrings.EscapeQualifiedNameForDisk(name), d.volName)
}
func (d *azureDiskDeleter) Delete() error {
glog.V(4).Infof("deleting volume %s", d.diskUri)
return d.azureProvider.DeleteVolume(d.diskName, d.diskUri)
}
type azureDiskProvisioner struct {
*azureDisk
azureProvider azureCloudProvider
options volume.VolumeOptions
}
var _ volume.Provisioner = &azureDiskProvisioner{}
func (a *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
var sku, location, account string
name := volume.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 255)
capacity := a.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestBytes := capacity.Value()
requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
// Apply ProvisionerParameters (case-insensitive). We leave validation of
// the values to the cloud provider.
for k, v := range a.options.Parameters {
switch strings.ToLower(k) {
case "skuname":
sku = v
case "location":
location = v
case "storageaccount":
account = v
default:
return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, a.plugin.GetPluginName())
}
}
// TODO: implement c.options.ProvisionerSelector parsing
if a.options.PVC.Spec.Selector != nil {
return nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Azure disk")
}
diskName, diskUri, sizeGB, err := a.azureProvider.CreateVolume(name, account, sku, location, requestGB)
if err != nil {
return nil, err
}
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: a.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
"kubernetes.io/createdby": "azure-disk-dynamic-provisioner",
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: a.options.PersistentVolumeReclaimPolicy,
AccessModes: a.options.PVC.Spec.AccessModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DiskName: diskName,
DataDiskURI: diskUri,
},
},
},
}
return pv, nil
}

View file

@ -0,0 +1,117 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"io/ioutil"
"os"
"path"
"regexp"
"strconv"
"strings"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/util/exec"
)
type ioHandler interface {
ReadDir(dirname string) ([]os.FileInfo, error)
WriteFile(filename string, data []byte, perm os.FileMode) error
}
type osIOHandler struct{}
func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
return ioutil.ReadDir(dirname)
}
func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
return ioutil.WriteFile(filename, data, perm)
}
// given a LUN find the VHD device path like /dev/sdb
// VHD disks under sysfs are like /sys/bus/scsi/devices/3:0:1:0
// return empty string if no disk is found
func findDiskByLun(lun int, io ioHandler, exe exec.Interface) (string, error) {
var err error
sys_path := "/sys/bus/scsi/devices"
if dirs, err := io.ReadDir(sys_path); err == nil {
for _, f := range dirs {
name := f.Name()
// look for path like /sys/bus/scsi/devices/3:0:1:0
arr := strings.Split(name, ":")
if len(arr) < 4 {
continue
}
target, err := strconv.Atoi(arr[0])
if err != nil {
glog.Errorf("failed to parse target from %v (%v), err %v", arr[0], name, err)
continue
}
// as observed, targets 0-3 are used by OS disks. Skip them
if target > 3 {
l, err := strconv.Atoi(arr[3])
if err != nil {
// unknown path format, continue to read the next one
glog.Errorf("failed to parse lun from %v (%v), err %v", arr[3], name, err)
continue
}
if lun == l {
// find the matching LUN
// read vendor and model to ensure it is a VHD disk
vendor := path.Join(sys_path, name, "vendor")
model := path.Join(sys_path, name, "model")
out, err := exe.Command("cat", vendor, model).CombinedOutput()
if err != nil {
glog.Errorf("failed to cat device vendor and model, err: %v", err)
continue
}
matched, err := regexp.MatchString("^MSFT[ ]{0,}\nVIRTUAL DISK[ ]{0,}\n$", strings.ToUpper(string(out)))
if err != nil || !matched {
glog.V(4).Infof("doesn't match VHD, output %v, error %v", string(out), err)
continue
}
// find it!
dir := path.Join(sys_path, name, "block")
dev, err := io.ReadDir(dir)
if err != nil {
glog.Errorf("failed to read %s", dir)
} else {
return "/dev/" + dev[0].Name(), nil
}
}
}
}
}
return "", err
}
// rescan scsi bus
func scsiHostRescan(io ioHandler) {
scsi_path := "/sys/class/scsi_host/"
if dirs, err := io.ReadDir(scsi_path); err == nil {
for _, f := range dirs {
name := scsi_path + f.Name() + "/scan"
data := []byte("- - -")
if err = io.WriteFile(name, data, 0666); err != nil {
glog.Errorf("failed to rescan scsi host %s", name)
}
}
} else {
glog.Errorf("failed to read %s, err %v", scsi_path, err)
}
}

View file

@ -0,0 +1,116 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"fmt"
"os"
"testing"
"time"
"k8s.io/kubernetes/pkg/util/exec"
)
type fakeFileInfo struct {
name string
}
func (fi *fakeFileInfo) Name() string {
return fi.name
}
func (fi *fakeFileInfo) Size() int64 {
return 0
}
func (fi *fakeFileInfo) Mode() os.FileMode {
return 777
}
func (fi *fakeFileInfo) ModTime() time.Time {
return time.Now()
}
func (fi *fakeFileInfo) IsDir() bool {
return false
}
func (fi *fakeFileInfo) Sys() interface{} {
return nil
}
var (
lun = 1
lunStr = "1"
diskPath = "4:0:0:" + lunStr
devName = "sda"
)
type fakeIOHandler struct{}
func (handler *fakeIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
switch dirname {
case "/sys/bus/scsi/devices":
f1 := &fakeFileInfo{
name: "3:0:0:1",
}
f2 := &fakeFileInfo{
name: "4:0:0:0",
}
f3 := &fakeFileInfo{
name: diskPath,
}
f4 := &fakeFileInfo{
name: "host1",
}
f5 := &fakeFileInfo{
name: "target2:0:0",
}
return []os.FileInfo{f1, f2, f3, f4, f5}, nil
case "/sys/bus/scsi/devices/" + diskPath + "/block":
n := &fakeFileInfo{
name: devName,
}
return []os.FileInfo{n}, nil
}
return nil, fmt.Errorf("bad dir")
}
func (handler *fakeIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
return nil
}
func TestIoHandler(t *testing.T) {
var fcmd exec.FakeCmd
fcmd = exec.FakeCmd{
CombinedOutputScript: []exec.FakeCombinedOutputAction{
// cat
func() ([]byte, error) {
return []byte("Msft \nVirtual Disk \n"), nil
},
},
}
fake := exec.FakeExec{
CommandScript: []exec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
},
}
disk, err := findDiskByLun(lun, &fakeIOHandler{}, &fake)
// if no disk matches lun, exit
if disk != "/dev/"+devName || err != nil {
t.Errorf("no data disk disk found: disk %v err %v", disk, err)
}
}