1
0
Fork 0
forked from barak/tarpoon

Add glide.yaml and vendor deps

This commit is contained in:
Dalton Hubble 2016-12-03 22:43:32 -08:00
parent db918f12ad
commit 5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions

View file

@ -0,0 +1,49 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["attach_detach_controller.go"],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/core/v1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/controller/volume/attachdetach/cache:go_default_library",
"//pkg/controller/volume/attachdetach/populator:go_default_library",
"//pkg/controller/volume/attachdetach/reconciler:go_default_library",
"//pkg/controller/volume/attachdetach/statusupdater:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/io:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/runtime:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util/operationexecutor:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = ["attach_detach_controller_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/controller/informers:go_default_library",
"//pkg/controller/volume/attachdetach/testing:go_default_library",
],
)

View file

@ -0,0 +1,2 @@
assignees:
- saad-ali

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,50 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package attachdetach
import (
"testing"
"time"
"k8s.io/kubernetes/pkg/controller/informers"
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
)
func Test_NewAttachDetachController_Positive(t *testing.T) {
// Arrange
fakeKubeClient := controllervolumetesting.CreateTestClient()
resyncPeriod := 5 * time.Minute
podInformer := informers.NewPodInformer(fakeKubeClient, resyncPeriod)
nodeInformer := informers.NewNodeInformer(fakeKubeClient, resyncPeriod)
pvcInformer := informers.NewPVCInformer(fakeKubeClient, resyncPeriod)
pvInformer := informers.NewPVInformer(fakeKubeClient, resyncPeriod)
// Act
_, err := NewAttachDetachController(
fakeKubeClient,
podInformer,
nodeInformer,
pvcInformer,
pvInformer,
nil, /* cloud */
nil /* plugins */)
// Assert
if err != nil {
t.Fatalf("Run failed with error. Expected: <no error> Actual: <%v>", err)
}
}

View file

@ -0,0 +1,46 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"actual_state_of_world.go",
"desired_state_of_world.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/types:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util/operationexecutor:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = [
"actual_state_of_world_test.go",
"desired_state_of_world_test.go",
],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/controller/volume/attachdetach/testing:go_default_library",
"//pkg/types:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util/types:go_default_library",
],
)

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,363 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package cache implements data structures used by the attach/detach controller
to keep track of volumes, the nodes they are attached to, and the pods that
reference them.
*/
package cache
import (
"fmt"
"sync"
"k8s.io/kubernetes/pkg/api/v1"
k8stypes "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
"k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// DesiredStateOfWorld defines a set of thread-safe operations supported on
// the attach/detach controller's desired state of the world cache.
// This cache contains nodes->volumes->pods where nodes are all the nodes
// managed by the attach/detach controller, volumes are all the volumes that
// should be attached to the specified node, and pods are the pods that
// reference the volume and are scheduled to that node.
// Note: This is distinct from the DesiredStateOfWorld implemented by the
// kubelet volume manager. The both keep track of different objects. This
// contains attach/detach controller specific state.
type DesiredStateOfWorld interface {
// AddNode adds the given node to the list of nodes managed by the attach/
// detach controller.
// If the node already exists this is a no-op.
AddNode(nodeName k8stypes.NodeName)
// AddPod adds the given pod to the list of pods that reference the
// specified volume and is scheduled to the specified node.
// A unique volumeName is generated from the volumeSpec and returned on
// success.
// If the pod already exists under the specified volume, this is a no-op.
// If volumeSpec is not an attachable volume plugin, an error is returned.
// If no volume with the name volumeName exists in the list of volumes that
// should be attached to the specified node, the volume is implicitly added.
// If no node with the name nodeName exists in list of nodes managed by the
// attach/detach attached controller, an error is returned.
AddPod(podName types.UniquePodName, pod *v1.Pod, volumeSpec *volume.Spec, nodeName k8stypes.NodeName) (v1.UniqueVolumeName, error)
// DeleteNode removes the given node from the list of nodes managed by the
// attach/detach controller.
// If the node does not exist this is a no-op.
// If the node exists but has 1 or more child volumes, an error is returned.
DeleteNode(nodeName k8stypes.NodeName) error
// DeletePod removes the given pod from the list of pods that reference the
// specified volume and are scheduled to the specified node.
// If no pod exists in the list of pods that reference the specified volume
// and are scheduled to the specified node, this is a no-op.
// If a node with the name nodeName does not exist in the list of nodes
// managed by the attach/detach attached controller, this is a no-op.
// If no volume with the name volumeName exists in the list of managed
// volumes under the specified node, this is a no-op.
// If after deleting the pod, the specified volume contains no other child
// pods, the volume is also deleted.
DeletePod(podName types.UniquePodName, volumeName v1.UniqueVolumeName, nodeName k8stypes.NodeName)
// NodeExists returns true if the node with the specified name exists in
// the list of nodes managed by the attach/detach controller.
NodeExists(nodeName k8stypes.NodeName) bool
// VolumeExists returns true if the volume with the specified name exists
// in the list of volumes that should be attached to the specified node by
// the attach detach controller.
VolumeExists(volumeName v1.UniqueVolumeName, nodeName k8stypes.NodeName) bool
// GetVolumesToAttach generates and returns a list of volumes to attach
// and the nodes they should be attached to based on the current desired
// state of the world.
GetVolumesToAttach() []VolumeToAttach
// GetPodToAdd generates and returns a map of pods based on the current desired
// state of world
GetPodToAdd() map[types.UniquePodName]PodToAdd
}
// VolumeToAttach represents a volume that should be attached to a node.
type VolumeToAttach struct {
operationexecutor.VolumeToAttach
}
// PodToAdd represents a pod that references the underlying volume and is
// scheduled to the underlying node.
type PodToAdd struct {
// pod contains the api object of pod
Pod *v1.Pod
// volumeName contains the unique identifier for this volume.
VolumeName v1.UniqueVolumeName
// nodeName contains the name of this node.
NodeName k8stypes.NodeName
}
// NewDesiredStateOfWorld returns a new instance of DesiredStateOfWorld.
func NewDesiredStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) DesiredStateOfWorld {
return &desiredStateOfWorld{
nodesManaged: make(map[k8stypes.NodeName]nodeManaged),
volumePluginMgr: volumePluginMgr,
}
}
type desiredStateOfWorld struct {
// nodesManaged is a map containing the set of nodes managed by the attach/
// detach controller. The key in this map is the name of the node and the
// value is a node object containing more information about the node.
nodesManaged map[k8stypes.NodeName]nodeManaged
// volumePluginMgr is the volume plugin manager used to create volume
// plugin objects.
volumePluginMgr *volume.VolumePluginMgr
sync.RWMutex
}
// nodeManaged represents a node that is being managed by the attach/detach
// controller.
type nodeManaged struct {
// nodeName contains the name of this node.
nodeName k8stypes.NodeName
// volumesToAttach is a map containing the set of volumes that should be
// attached to this node. The key in the map is the name of the volume and
// the value is a pod object containing more information about the volume.
volumesToAttach map[v1.UniqueVolumeName]volumeToAttach
}
// The volume object represents a volume that should be attached to a node.
type volumeToAttach struct {
// volumeName contains the unique identifier for this volume.
volumeName v1.UniqueVolumeName
// spec is the volume spec containing the specification for this volume.
// Used to generate the volume plugin object, and passed to attach/detach
// methods.
spec *volume.Spec
// scheduledPods is a map containing the set of pods that reference this
// volume and are scheduled to the underlying node. The key in the map is
// the name of the pod and the value is a pod object containing more
// information about the pod.
scheduledPods map[types.UniquePodName]pod
}
// The pod represents a pod that references the underlying volume and is
// scheduled to the underlying node.
type pod struct {
// podName contains the unique identifier for this pod
podName types.UniquePodName
// pod object contains the api object of pod
podObj *v1.Pod
}
func (dsw *desiredStateOfWorld) AddNode(nodeName k8stypes.NodeName) {
dsw.Lock()
defer dsw.Unlock()
if _, nodeExists := dsw.nodesManaged[nodeName]; !nodeExists {
dsw.nodesManaged[nodeName] = nodeManaged{
nodeName: nodeName,
volumesToAttach: make(map[v1.UniqueVolumeName]volumeToAttach),
}
}
}
func (dsw *desiredStateOfWorld) AddPod(
podName types.UniquePodName,
podToAdd *v1.Pod,
volumeSpec *volume.Spec,
nodeName k8stypes.NodeName) (v1.UniqueVolumeName, error) {
dsw.Lock()
defer dsw.Unlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if !nodeExists {
return "", fmt.Errorf(
"no node with the name %q exists in the list of managed nodes",
nodeName)
}
attachableVolumePlugin, err := dsw.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if err != nil || attachableVolumePlugin == nil {
return "", fmt.Errorf(
"failed to get AttachablePlugin from volumeSpec for volume %q err=%v",
volumeSpec.Name(),
err)
}
volumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(
attachableVolumePlugin, volumeSpec)
if err != nil {
return "", fmt.Errorf(
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q err=%v",
volumeSpec.Name(),
err)
}
volumeObj, volumeExists := nodeObj.volumesToAttach[volumeName]
if !volumeExists {
volumeObj = volumeToAttach{
volumeName: volumeName,
spec: volumeSpec,
scheduledPods: make(map[types.UniquePodName]pod),
}
dsw.nodesManaged[nodeName].volumesToAttach[volumeName] = volumeObj
}
if _, podExists := volumeObj.scheduledPods[podName]; !podExists {
dsw.nodesManaged[nodeName].volumesToAttach[volumeName].scheduledPods[podName] =
pod{
podName: podName,
podObj: podToAdd,
}
}
return volumeName, nil
}
func (dsw *desiredStateOfWorld) DeleteNode(nodeName k8stypes.NodeName) error {
dsw.Lock()
defer dsw.Unlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if !nodeExists {
return nil
}
if len(nodeObj.volumesToAttach) > 0 {
return fmt.Errorf(
"failed to delete node %q from list of nodes managed by attach/detach controller--the node still contains %v volumes in its list of volumes to attach",
nodeName,
len(nodeObj.volumesToAttach))
}
delete(
dsw.nodesManaged,
nodeName)
return nil
}
func (dsw *desiredStateOfWorld) DeletePod(
podName types.UniquePodName,
volumeName v1.UniqueVolumeName,
nodeName k8stypes.NodeName) {
dsw.Lock()
defer dsw.Unlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if !nodeExists {
return
}
volumeObj, volumeExists := nodeObj.volumesToAttach[volumeName]
if !volumeExists {
return
}
if _, podExists := volumeObj.scheduledPods[podName]; !podExists {
return
}
delete(
dsw.nodesManaged[nodeName].volumesToAttach[volumeName].scheduledPods,
podName)
if len(volumeObj.scheduledPods) == 0 {
delete(
dsw.nodesManaged[nodeName].volumesToAttach,
volumeName)
}
}
func (dsw *desiredStateOfWorld) NodeExists(nodeName k8stypes.NodeName) bool {
dsw.RLock()
defer dsw.RUnlock()
_, nodeExists := dsw.nodesManaged[nodeName]
return nodeExists
}
func (dsw *desiredStateOfWorld) VolumeExists(
volumeName v1.UniqueVolumeName, nodeName k8stypes.NodeName) bool {
dsw.RLock()
defer dsw.RUnlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if nodeExists {
if _, volumeExists := nodeObj.volumesToAttach[volumeName]; volumeExists {
return true
}
}
return false
}
func (dsw *desiredStateOfWorld) GetVolumesToAttach() []VolumeToAttach {
dsw.RLock()
defer dsw.RUnlock()
volumesToAttach := make([]VolumeToAttach, 0 /* len */, len(dsw.nodesManaged) /* cap */)
for nodeName, nodeObj := range dsw.nodesManaged {
for volumeName, volumeObj := range nodeObj.volumesToAttach {
volumesToAttach = append(volumesToAttach,
VolumeToAttach{
VolumeToAttach: operationexecutor.VolumeToAttach{
VolumeName: volumeName,
VolumeSpec: volumeObj.spec,
NodeName: nodeName,
ScheduledPods: getPodsFromMap(volumeObj.scheduledPods),
}})
}
}
return volumesToAttach
}
// Construct a list of v1.Pod objects from the given pod map
func getPodsFromMap(podMap map[types.UniquePodName]pod) []*v1.Pod {
pods := make([]*v1.Pod, 0, len(podMap))
for _, pod := range podMap {
pods = append(pods, pod.podObj)
}
return pods
}
func (dsw *desiredStateOfWorld) GetPodToAdd() map[types.UniquePodName]PodToAdd {
dsw.RLock()
defer dsw.RUnlock()
pods := make(map[types.UniquePodName]PodToAdd)
for nodeName, nodeObj := range dsw.nodesManaged {
for volumeName, volumeObj := range nodeObj.volumesToAttach {
for podUID, pod := range volumeObj.scheduledPods {
pods[podUID] = PodToAdd{
Pod: pod.podObj,
VolumeName: volumeName,
NodeName: nodeName,
}
}
}
}
return pods
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,25 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["desired_state_of_world_populator.go"],
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/controller/volume/attachdetach/cache:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor:github.com/golang/glog",
],
)

View file

@ -0,0 +1,125 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package populator implements interfaces that monitor and keep the states of the
// desired_state_of_word in sync with the "ground truth" from informer.
package populator
import (
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1"
kcache "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// DesiredStateOfWorldPopulator periodically verifies that the pods in the
// desired state of th world still exist, if not, it removes them.
// TODO: it also loops through the list of active pods and ensures that
// each one exists in the desired state of the world cache
// if it has volumes.
type DesiredStateOfWorldPopulator interface {
Run(stopCh <-chan struct{})
}
// NewDesiredStateOfWorldPopulator returns a new instance of DesiredStateOfWorldPopulator.
// loopSleepDuration - the amount of time the populator loop sleeps between
// successive executions
// podManager - the kubelet podManager that is the source of truth for the pods
// that exist on this host
// desiredStateOfWorld - the cache to populate
func NewDesiredStateOfWorldPopulator(
loopSleepDuration time.Duration,
podInformer kcache.SharedInformer,
desiredStateOfWorld cache.DesiredStateOfWorld) DesiredStateOfWorldPopulator {
return &desiredStateOfWorldPopulator{
loopSleepDuration: loopSleepDuration,
podInformer: podInformer,
desiredStateOfWorld: desiredStateOfWorld,
}
}
type desiredStateOfWorldPopulator struct {
loopSleepDuration time.Duration
podInformer kcache.SharedInformer
desiredStateOfWorld cache.DesiredStateOfWorld
}
func (dswp *desiredStateOfWorldPopulator) Run(stopCh <-chan struct{}) {
wait.Until(dswp.populatorLoopFunc(), dswp.loopSleepDuration, stopCh)
}
func (dswp *desiredStateOfWorldPopulator) populatorLoopFunc() func() {
return func() {
dswp.findAndRemoveDeletedPods()
}
}
// Iterate through all pods in desired state of world, and remove if they no
// longer exist in the informer
func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
for dswPodUID, dswPodToAdd := range dswp.desiredStateOfWorld.GetPodToAdd() {
dswPodKey, err := kcache.MetaNamespaceKeyFunc(dswPodToAdd.Pod)
if err != nil {
glog.Errorf("MetaNamespaceKeyFunc failed for pod %q (UID %q) with: %v", dswPodKey, dswPodUID, err)
continue
}
// Retrieve the pod object from pod informer with the namespace key
informerPodObj, exists, err :=
dswp.podInformer.GetStore().GetByKey(dswPodKey)
if err != nil {
glog.Errorf(
"podInformer GetByKey failed for pod %q (UID %q) with %v",
dswPodKey,
dswPodUID,
err)
continue
}
if exists && informerPodObj == nil {
glog.Info(
"podInformer GetByKey found pod, but informerPodObj is nil for pod %q (UID %q)",
dswPodKey,
dswPodUID)
continue
}
if exists {
informerPod, ok := informerPodObj.(*v1.Pod)
if !ok {
glog.Errorf("Failed to cast obj %#v to pod object for pod %q (UID %q)", informerPod, dswPodKey, dswPodUID)
continue
}
informerPodUID := volumehelper.GetUniquePodName(informerPod)
// Check whether the unique identifier of the pod from dsw matches the one retrieved from pod informer
if informerPodUID == dswPodUID {
glog.V(10).Infof(
"Verified pod %q (UID %q) from dsw exists in pod informer.", dswPodKey, dswPodUID)
continue
}
}
// the pod from dsw does not exist in pod informer, or it does not match the unique identifer retrieved
// from the informer, delete it from dsw
glog.V(1).Infof(
"Removing pod %q (UID %q) from dsw because it does not exist in pod informer.", dswPodKey, dswPodUID)
dswp.desiredStateOfWorld.DeletePod(dswPodUID, dswPodToAdd.VolumeName, dswPodToAdd.NodeName)
}
}

View file

@ -0,0 +1,46 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["reconciler.go"],
tags = ["automanaged"],
deps = [
"//pkg/controller/volume/attachdetach/cache:go_default_library",
"//pkg/controller/volume/attachdetach/statusupdater:go_default_library",
"//pkg/util/goroutinemap/exponentialbackoff:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/volume/util/nestedpendingoperations:go_default_library",
"//pkg/volume/util/operationexecutor:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = ["reconciler_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/controller/informers:go_default_library",
"//pkg/controller/volume/attachdetach/cache:go_default_library",
"//pkg/controller/volume/attachdetach/statusupdater:go_default_library",
"//pkg/controller/volume/attachdetach/testing:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util/operationexecutor:go_default_library",
"//pkg/volume/util/types:go_default_library",
],
)

View file

@ -0,0 +1,224 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package reconciler implements interfaces that attempt to reconcile the
// desired state of the with the actual state of the world by triggering
// actions.
package reconciler
import (
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater"
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
)
// Reconciler runs a periodic loop to reconcile the desired state of the with
// the actual state of the world by triggering attach detach operations.
// Note: This is distinct from the Reconciler implemented by the kubelet volume
// manager. This reconciles state for the attach/detach controller. That
// reconciles state for the kubelet volume manager.
type Reconciler interface {
// Starts running the reconciliation loop which executes periodically, checks
// if volumes that should be attached are attached and volumes that should
// be detached are detached. If not, it will trigger attach/detach
// operations to rectify.
Run(stopCh <-chan struct{})
}
// NewReconciler returns a new instance of Reconciler that waits loopPeriod
// between successive executions.
// loopPeriod is the amount of time the reconciler loop waits between
// successive executions.
// maxWaitForUnmountDuration is the max amount of time the reconciler will wait
// for the volume to be safely unmounted, after this it will detach the volume
// anyway (to handle crashed/unavailable nodes). If during this time the volume
// becomes used by a new pod, the detach request will be aborted and the timer
// cleared.
func NewReconciler(
loopPeriod time.Duration,
maxWaitForUnmountDuration time.Duration,
syncDuration time.Duration,
desiredStateOfWorld cache.DesiredStateOfWorld,
actualStateOfWorld cache.ActualStateOfWorld,
attacherDetacher operationexecutor.OperationExecutor,
nodeStatusUpdater statusupdater.NodeStatusUpdater) Reconciler {
return &reconciler{
loopPeriod: loopPeriod,
maxWaitForUnmountDuration: maxWaitForUnmountDuration,
syncDuration: syncDuration,
desiredStateOfWorld: desiredStateOfWorld,
actualStateOfWorld: actualStateOfWorld,
attacherDetacher: attacherDetacher,
nodeStatusUpdater: nodeStatusUpdater,
timeOfLastSync: time.Now(),
}
}
type reconciler struct {
loopPeriod time.Duration
maxWaitForUnmountDuration time.Duration
syncDuration time.Duration
desiredStateOfWorld cache.DesiredStateOfWorld
actualStateOfWorld cache.ActualStateOfWorld
attacherDetacher operationexecutor.OperationExecutor
nodeStatusUpdater statusupdater.NodeStatusUpdater
timeOfLastSync time.Time
}
func (rc *reconciler) Run(stopCh <-chan struct{}) {
wait.Until(rc.reconciliationLoopFunc(), rc.loopPeriod, stopCh)
}
func (rc *reconciler) reconciliationLoopFunc() func() {
return func() {
rc.reconcile()
// reconciler periodically checks whether the attached volumes from actual state
// are still attached to the node and udpate the status if they are not.
if time.Since(rc.timeOfLastSync) > rc.syncDuration {
rc.sync()
}
}
}
func (rc *reconciler) sync() {
defer rc.updateSyncTime()
rc.syncStates()
}
func (rc *reconciler) updateSyncTime() {
rc.timeOfLastSync = time.Now()
}
func (rc *reconciler) syncStates() {
volumesPerNode := rc.actualStateOfWorld.GetAttachedVolumesPerNode()
for nodeName, volumes := range volumesPerNode {
err := rc.attacherDetacher.VerifyVolumesAreAttached(volumes, nodeName, rc.actualStateOfWorld)
if err != nil {
glog.Errorf("Error in syncing states for volumes: %v", err)
}
}
}
func (rc *reconciler) reconcile() {
// Detaches are triggered before attaches so that volumes referenced by
// pods that are rescheduled to a different node are detached first.
// Ensure volumes that should be detached are detached.
for _, attachedVolume := range rc.actualStateOfWorld.GetAttachedVolumes() {
if !rc.desiredStateOfWorld.VolumeExists(
attachedVolume.VolumeName, attachedVolume.NodeName) {
// Set the detach request time
elapsedTime, err := rc.actualStateOfWorld.SetDetachRequestTime(attachedVolume.VolumeName, attachedVolume.NodeName)
if err != nil {
glog.Errorf("Cannot trigger detach because it fails to set detach request time with error %v", err)
continue
}
// Check whether timeout has reached the maximum waiting time
timeout := elapsedTime > rc.maxWaitForUnmountDuration
// Check whether volume is still mounted. Skip detach if it is still mounted unless timeout
if attachedVolume.MountedByNode && !timeout {
glog.V(12).Infof("Cannot trigger detach for volume %q on node %q because volume is still mounted",
attachedVolume.VolumeName,
attachedVolume.NodeName)
continue
}
// Before triggering volume detach, mark volume as detached and update the node status
// If it fails to update node status, skip detach volume
rc.actualStateOfWorld.RemoveVolumeFromReportAsAttached(attachedVolume.VolumeName, attachedVolume.NodeName)
// Update Node Status to indicate volume is no longer safe to mount.
err = rc.nodeStatusUpdater.UpdateNodeStatuses()
if err != nil {
// Skip detaching this volume if unable to update node status
glog.Errorf("UpdateNodeStatuses failed while attempting to report volume %q as attached to node %q with: %v ",
attachedVolume.VolumeName,
attachedVolume.NodeName,
err)
continue
}
// Trigger detach volume which requires verifing safe to detach step
// If timeout is true, skip verifySafeToDetach check
glog.V(5).Infof("Attempting to start DetachVolume for volume %q from node %q", attachedVolume.VolumeName, attachedVolume.NodeName)
verifySafeToDetach := !timeout
err = rc.attacherDetacher.DetachVolume(attachedVolume.AttachedVolume, verifySafeToDetach, rc.actualStateOfWorld)
if err == nil {
if !timeout {
glog.Infof("Started DetachVolume for volume %q from node %q", attachedVolume.VolumeName, attachedVolume.NodeName)
} else {
glog.Infof("Started DetachVolume for volume %q from node %q. This volume is not safe to detach, but maxWaitForUnmountDuration %v expired, force detaching",
attachedVolume.VolumeName,
attachedVolume.NodeName,
rc.maxWaitForUnmountDuration)
}
}
if err != nil &&
!nestedpendingoperations.IsAlreadyExists(err) &&
!exponentialbackoff.IsExponentialBackoff(err) {
// Ignore nestedpendingoperations.IsAlreadyExists && exponentialbackoff.IsExponentialBackoff errors, they are expected.
// Log all other errors.
glog.Errorf(
"operationExecutor.DetachVolume failed to start for volume %q (spec.Name: %q) from node %q with err: %v",
attachedVolume.VolumeName,
attachedVolume.VolumeSpec.Name(),
attachedVolume.NodeName,
err)
}
}
}
// Ensure volumes that should be attached are attached.
for _, volumeToAttach := range rc.desiredStateOfWorld.GetVolumesToAttach() {
if rc.actualStateOfWorld.VolumeNodeExists(
volumeToAttach.VolumeName, volumeToAttach.NodeName) {
// Volume/Node exists, touch it to reset detachRequestedTime
glog.V(5).Infof("Volume %q/Node %q is attached--touching.", volumeToAttach.VolumeName, volumeToAttach.NodeName)
rc.actualStateOfWorld.ResetDetachRequestTime(volumeToAttach.VolumeName, volumeToAttach.NodeName)
} else {
// Volume/Node doesn't exist, spawn a goroutine to attach it
glog.V(5).Infof("Attempting to start AttachVolume for volume %q to node %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
err := rc.attacherDetacher.AttachVolume(volumeToAttach.VolumeToAttach, rc.actualStateOfWorld)
if err == nil {
glog.Infof("Started AttachVolume for volume %q to node %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
}
if err != nil &&
!nestedpendingoperations.IsAlreadyExists(err) &&
!exponentialbackoff.IsExponentialBackoff(err) {
// Ignore nestedpendingoperations.IsAlreadyExists && exponentialbackoff.IsExponentialBackoff errors, they are expected.
// Log all other errors.
glog.Errorf(
"operationExecutor.AttachVolume failed to start for volume %q (spec.Name: %q) to node %q with err: %v",
volumeToAttach.VolumeName,
volumeToAttach.VolumeSpec.Name(),
volumeToAttach.NodeName,
err)
}
}
}
// Update Node Status
err := rc.nodeStatusUpdater.UpdateNodeStatuses()
if err != nil {
glog.Infof("UpdateNodeStatuses failed with: %v", err)
}
}

View file

@ -0,0 +1,478 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reconciler
import (
"testing"
"time"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater"
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
k8stypes "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/wait"
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
"k8s.io/kubernetes/pkg/volume/util/types"
)
const (
reconcilerLoopPeriod time.Duration = 0 * time.Millisecond
syncLoopPeriod time.Duration = 100 * time.Minute
maxWaitForUnmountDuration time.Duration = 50 * time.Millisecond
resyncPeriod time.Duration = 5 * time.Minute
)
// Calls Run()
// Verifies there are no calls to attach or detach.
func Test_Run_Positive_DoNothing(t *testing.T) {
// Arrange
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(volumePluginMgr)
fakeKubeClient := controllervolumetesting.CreateTestClient()
fakeRecorder := &record.FakeRecorder{}
ad := operationexecutor.NewOperationExecutor(
fakeKubeClient, volumePluginMgr, fakeRecorder, false)
nodeInformer := informers.NewNodeInformer(
fakeKubeClient, resyncPeriod)
nsu := statusupdater.NewNodeStatusUpdater(
fakeKubeClient, nodeInformer, asw)
reconciler := NewReconciler(
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, dsw, asw, ad, nsu)
// Act
ch := make(chan struct{})
go reconciler.Run(ch)
defer close(ch)
// Assert
waitForNewAttacherCallCount(t, 0 /* expectedCallCount */, fakePlugin)
verifyNewAttacherCallCount(t, true /* expectZeroNewAttacherCallCount */, fakePlugin)
verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin)
waitForAttachCallCount(t, 0 /* expectedAttachCallCount */, fakePlugin)
waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin)
}
// Populates desiredStateOfWorld cache with one node/volume/pod tuple.
// Calls Run()
// Verifies there is one attach call and no detach calls.
func Test_Run_Positive_OneDesiredVolumeAttach(t *testing.T) {
// Arrange
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(volumePluginMgr)
fakeKubeClient := controllervolumetesting.CreateTestClient()
fakeRecorder := &record.FakeRecorder{}
ad := operationexecutor.NewOperationExecutor(fakeKubeClient, volumePluginMgr, fakeRecorder, false)
nsu := statusupdater.NewFakeNodeStatusUpdater(false /* returnError */)
reconciler := NewReconciler(
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, dsw, asw, ad, nsu)
podName := "pod-uid"
volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName)
volumeExists := dsw.VolumeExists(volumeName, nodeName)
if volumeExists {
t.Fatalf(
"Volume %q/node %q should not exist, but it does.",
volumeName,
nodeName)
}
_, podErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName)
if podErr != nil {
t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podErr)
}
// Act
ch := make(chan struct{})
go reconciler.Run(ch)
defer close(ch)
// Assert
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin)
}
// Populates desiredStateOfWorld cache with one node/volume/pod tuple.
// Calls Run()
// Verifies there is one attach call and no detach calls.
// Marks the node/volume as unmounted.
// Deletes the node/volume/pod tuple from desiredStateOfWorld cache.
// Verifies there is one detach call and no (new) attach calls.
func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithUnmountedVolume(t *testing.T) {
// Arrange
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(volumePluginMgr)
fakeKubeClient := controllervolumetesting.CreateTestClient()
fakeRecorder := &record.FakeRecorder{}
ad := operationexecutor.NewOperationExecutor(fakeKubeClient, volumePluginMgr, fakeRecorder, false)
nsu := statusupdater.NewFakeNodeStatusUpdater(false /* returnError */)
reconciler := NewReconciler(
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, dsw, asw, ad, nsu)
podName := "pod-uid"
volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName)
volumeExists := dsw.VolumeExists(volumeName, nodeName)
if volumeExists {
t.Fatalf(
"Volume %q/node %q should not exist, but it does.",
volumeName,
nodeName)
}
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName)
if podAddErr != nil {
t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podAddErr)
}
// Act
ch := make(chan struct{})
go reconciler.Run(ch)
defer close(ch)
// Assert
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin)
waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin)
// Act
dsw.DeletePod(types.UniquePodName(podName), generatedVolumeName, nodeName)
volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName)
if volumeExists {
t.Fatalf(
"Deleted pod %q from volume %q/node %q. Volume should also be deleted but it still exists.",
podName,
generatedVolumeName,
nodeName)
}
asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, true /* mounted */)
asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */)
// Assert
waitForNewDetacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
verifyNewDetacherCallCount(t, false /* expectZeroNewDetacherCallCount */, fakePlugin)
waitForDetachCallCount(t, 1 /* expectedDetachCallCount */, fakePlugin)
}
// Populates desiredStateOfWorld cache with one node/volume/pod tuple.
// Calls Run()
// Verifies there is one attach call and no detach calls.
// Deletes the node/volume/pod tuple from desiredStateOfWorld cache without first marking the node/volume as unmounted.
// Verifies there is one detach call and no (new) attach calls.
func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithMountedVolume(t *testing.T) {
// Arrange
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(volumePluginMgr)
fakeKubeClient := controllervolumetesting.CreateTestClient()
fakeRecorder := &record.FakeRecorder{}
ad := operationexecutor.NewOperationExecutor(fakeKubeClient, volumePluginMgr, fakeRecorder, false)
nsu := statusupdater.NewFakeNodeStatusUpdater(false /* returnError */)
reconciler := NewReconciler(
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, dsw, asw, ad, nsu)
podName := "pod-uid"
volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName)
volumeExists := dsw.VolumeExists(volumeName, nodeName)
if volumeExists {
t.Fatalf(
"Volume %q/node %q should not exist, but it does.",
volumeName,
nodeName)
}
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName)
if podAddErr != nil {
t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podAddErr)
}
// Act
ch := make(chan struct{})
go reconciler.Run(ch)
defer close(ch)
// Assert
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin)
waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin)
// Act
dsw.DeletePod(types.UniquePodName(podName), generatedVolumeName, nodeName)
volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName)
if volumeExists {
t.Fatalf(
"Deleted pod %q from volume %q/node %q. Volume should also be deleted but it still exists.",
podName,
generatedVolumeName,
nodeName)
}
// Assert -- Timer will triger detach
waitForNewDetacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
verifyNewDetacherCallCount(t, false /* expectZeroNewDetacherCallCount */, fakePlugin)
waitForDetachCallCount(t, 1 /* expectedDetachCallCount */, fakePlugin)
}
// Populates desiredStateOfWorld cache with one node/volume/pod tuple.
// Has node update fail
// Calls Run()
// Verifies there is one attach call and no detach calls.
// Marks the node/volume as unmounted.
// Deletes the node/volume/pod tuple from desiredStateOfWorld cache.
// Verifies there are NO detach call and no (new) attach calls.
func Test_Run_Negative_OneDesiredVolumeAttachThenDetachWithUnmountedVolumeUpdateStatusFail(t *testing.T) {
// Arrange
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(volumePluginMgr)
fakeKubeClient := controllervolumetesting.CreateTestClient()
fakeRecorder := &record.FakeRecorder{}
ad := operationexecutor.NewOperationExecutor(fakeKubeClient, volumePluginMgr, fakeRecorder, false)
nsu := statusupdater.NewFakeNodeStatusUpdater(true /* returnError */)
reconciler := NewReconciler(
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, dsw, asw, ad, nsu)
podName := "pod-uid"
volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName)
volumeExists := dsw.VolumeExists(volumeName, nodeName)
if volumeExists {
t.Fatalf(
"Volume %q/node %q should not exist, but it does.",
volumeName,
nodeName)
}
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName)
if podAddErr != nil {
t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podAddErr)
}
// Act
go reconciler.Run(wait.NeverStop)
// Assert
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin)
waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin)
// Act
dsw.DeletePod(types.UniquePodName(podName), generatedVolumeName, nodeName)
volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName)
if volumeExists {
t.Fatalf(
"Deleted pod %q from volume %q/node %q. Volume should also be deleted but it still exists.",
podName,
generatedVolumeName,
nodeName)
}
asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, true /* mounted */)
asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */)
// Assert
verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin)
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
verifyNewDetacherCallCount(t, false /* expectZeroNewDetacherCallCount */, fakePlugin)
waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin)
}
func waitForNewAttacherCallCount(
t *testing.T,
expectedCallCount int,
fakePlugin *volumetesting.FakeVolumePlugin) {
err := retryWithExponentialBackOff(
time.Duration(5*time.Millisecond),
func() (bool, error) {
actualCallCount := fakePlugin.GetNewAttacherCallCount()
if actualCallCount >= expectedCallCount {
return true, nil
}
t.Logf(
"Warning: Wrong NewAttacherCallCount. Expected: <%v> Actual: <%v>. Will retry.",
expectedCallCount,
actualCallCount)
return false, nil
},
)
if err != nil {
t.Fatalf(
"Timed out waiting for NewAttacherCallCount. Expected: <%v> Actual: <%v>",
expectedCallCount,
fakePlugin.GetNewAttacherCallCount())
}
}
func waitForNewDetacherCallCount(
t *testing.T,
expectedCallCount int,
fakePlugin *volumetesting.FakeVolumePlugin) {
err := retryWithExponentialBackOff(
time.Duration(5*time.Millisecond),
func() (bool, error) {
actualCallCount := fakePlugin.GetNewDetacherCallCount()
if actualCallCount >= expectedCallCount {
return true, nil
}
t.Logf(
"Warning: Wrong NewDetacherCallCount. Expected: <%v> Actual: <%v>. Will retry.",
expectedCallCount,
actualCallCount)
return false, nil
},
)
if err != nil {
t.Fatalf(
"Timed out waiting for NewDetacherCallCount. Expected: <%v> Actual: <%v>",
expectedCallCount,
fakePlugin.GetNewDetacherCallCount())
}
}
func waitForAttachCallCount(
t *testing.T,
expectedAttachCallCount int,
fakePlugin *volumetesting.FakeVolumePlugin) {
if len(fakePlugin.GetAttachers()) == 0 && expectedAttachCallCount == 0 {
return
}
err := retryWithExponentialBackOff(
time.Duration(5*time.Millisecond),
func() (bool, error) {
for i, attacher := range fakePlugin.GetAttachers() {
actualCallCount := attacher.GetAttachCallCount()
if actualCallCount == expectedAttachCallCount {
return true, nil
}
t.Logf(
"Warning: Wrong attacher[%v].GetAttachCallCount(). Expected: <%v> Actual: <%v>. Will try next attacher.",
i,
expectedAttachCallCount,
actualCallCount)
}
t.Logf(
"Warning: No attachers have expected AttachCallCount. Expected: <%v>. Will retry.",
expectedAttachCallCount)
return false, nil
},
)
if err != nil {
t.Fatalf(
"No attachers have expected AttachCallCount. Expected: <%v>",
expectedAttachCallCount)
}
}
func waitForDetachCallCount(
t *testing.T,
expectedDetachCallCount int,
fakePlugin *volumetesting.FakeVolumePlugin) {
if len(fakePlugin.GetDetachers()) == 0 && expectedDetachCallCount == 0 {
return
}
err := retryWithExponentialBackOff(
time.Duration(5*time.Millisecond),
func() (bool, error) {
for i, detacher := range fakePlugin.GetDetachers() {
actualCallCount := detacher.GetDetachCallCount()
if actualCallCount == expectedDetachCallCount {
return true, nil
}
t.Logf(
"Wrong detacher[%v].GetDetachCallCount(). Expected: <%v> Actual: <%v>. Will try next detacher.",
i,
expectedDetachCallCount,
actualCallCount)
}
t.Logf(
"Warning: No detachers have expected DetachCallCount. Expected: <%v>. Will retry.",
expectedDetachCallCount)
return false, nil
},
)
if err != nil {
t.Fatalf(
"No detachers have expected DetachCallCount. Expected: <%v>",
expectedDetachCallCount)
}
}
func verifyNewAttacherCallCount(
t *testing.T,
expectZeroNewAttacherCallCount bool,
fakePlugin *volumetesting.FakeVolumePlugin) {
if expectZeroNewAttacherCallCount &&
fakePlugin.GetNewAttacherCallCount() != 0 {
t.Fatalf(
"Wrong NewAttacherCallCount. Expected: <0> Actual: <%v>",
fakePlugin.GetNewAttacherCallCount())
}
}
func verifyNewDetacherCallCount(
t *testing.T,
expectZeroNewDetacherCallCount bool,
fakePlugin *volumetesting.FakeVolumePlugin) {
if expectZeroNewDetacherCallCount &&
fakePlugin.GetNewDetacherCallCount() != 0 {
t.Fatalf("Wrong NewDetacherCallCount. Expected: <0> Actual: <%v>",
fakePlugin.GetNewDetacherCallCount())
}
}
func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.ConditionFunc) error {
backoff := wait.Backoff{
Duration: initialDuration,
Factor: 3,
Jitter: 0,
Steps: 6,
}
return wait.ExponentialBackoff(backoff, fn)
}

View file

@ -0,0 +1,29 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"fake_node_status_updater.go",
"node_status_updater.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/controller/volume/attachdetach/cache:go_default_library",
"//pkg/conversion:go_default_library",
"//pkg/util/strategicpatch:go_default_library",
"//vendor:github.com/golang/glog",
],
)

View file

@ -0,0 +1,39 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package statusupdater
import (
"fmt"
)
func NewFakeNodeStatusUpdater(returnError bool) NodeStatusUpdater {
return &fakeNodeStatusUpdater{
returnError: returnError,
}
}
type fakeNodeStatusUpdater struct {
returnError bool
}
func (fnsu *fakeNodeStatusUpdater) UpdateNodeStatuses() error {
if fnsu.returnError {
return fmt.Errorf("fake error on update node status")
}
return nil
}

View file

@ -0,0 +1,139 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package statusupdater implements interfaces that enable updating the status
// of API objects.
package statusupdater
import (
"encoding/json"
"fmt"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1"
kcache "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
"k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/util/strategicpatch"
)
// NodeStatusUpdater defines a set of operations for updating the
// VolumesAttached field in the Node Status.
type NodeStatusUpdater interface {
// Gets a list of node statuses that should be updated from the actual state
// of the world and updates them.
UpdateNodeStatuses() error
}
// NewNodeStatusUpdater returns a new instance of NodeStatusUpdater.
func NewNodeStatusUpdater(
kubeClient clientset.Interface,
nodeInformer kcache.SharedInformer,
actualStateOfWorld cache.ActualStateOfWorld) NodeStatusUpdater {
return &nodeStatusUpdater{
actualStateOfWorld: actualStateOfWorld,
nodeInformer: nodeInformer,
kubeClient: kubeClient,
}
}
type nodeStatusUpdater struct {
kubeClient clientset.Interface
nodeInformer kcache.SharedInformer
actualStateOfWorld cache.ActualStateOfWorld
}
func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error {
// TODO: investigate right behavior if nodeName is empty
// kubernetes/kubernetes/issues/37777
nodesToUpdate := nsu.actualStateOfWorld.GetVolumesToReportAttached()
for nodeName, attachedVolumes := range nodesToUpdate {
nodeObj, exists, err := nsu.nodeInformer.GetStore().GetByKey(string(nodeName))
if nodeObj == nil || !exists || err != nil {
// If node does not exist, its status cannot be updated, log error and
// reset flag statusUpdateNeeded back to true to indicate this node status
// needs to be updated again
glog.V(2).Infof(
"Could not update node status. Failed to find node %q in NodeInformer cache. %v",
nodeName,
err)
nsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName)
continue
}
clonedNode, err := conversion.NewCloner().DeepCopy(nodeObj)
if err != nil {
return fmt.Errorf("error cloning node %q: %v",
nodeName,
err)
}
node, ok := clonedNode.(*v1.Node)
if !ok || node == nil {
return fmt.Errorf(
"failed to cast %q object %#v to Node",
nodeName,
clonedNode)
}
oldData, err := json.Marshal(node)
if err != nil {
return fmt.Errorf(
"failed to Marshal oldData for node %q. %v",
nodeName,
err)
}
node.Status.VolumesAttached = attachedVolumes
newData, err := json.Marshal(node)
if err != nil {
return fmt.Errorf(
"failed to Marshal newData for node %q. %v",
nodeName,
err)
}
patchBytes, err :=
strategicpatch.CreateStrategicMergePatch(oldData, newData, node)
if err != nil {
return fmt.Errorf(
"failed to CreateStrategicMergePatch for node %q. %v",
nodeName,
err)
}
_, err = nsu.kubeClient.Core().Nodes().PatchStatus(string(nodeName), patchBytes)
if err != nil {
// If update node status fails, reset flag statusUpdateNeeded back to true
// to indicate this node status needs to be updated again
nsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName)
return fmt.Errorf(
"failed to kubeClient.Core().Nodes().Patch for node %q. %v",
nodeName,
err)
}
glog.V(2).Infof(
"Updating status for node %q succeeded. patchBytes: %q VolumesAttached: %v",
nodeName,
string(patchBytes),
node.Status.VolumesAttached)
}
return nil
}

View file

@ -0,0 +1,26 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["testvolumespec.go"],
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/types:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/watch:go_default_library",
],
)

View file

@ -0,0 +1,115 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"fmt"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/watch"
)
// GetTestVolumeSpec returns a test volume spec
func GetTestVolumeSpec(volumeName string, diskName v1.UniqueVolumeName) *volume.Spec {
return &volume.Spec{
Volume: &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: string(diskName),
FSType: "fake",
ReadOnly: false,
},
},
},
}
}
func CreateTestClient() *fake.Clientset {
fakeClient := &fake.Clientset{}
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := &v1.PodList{}
podNamePrefix := "mypod"
namespace := "mynamespace"
for i := 0; i < 5; i++ {
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
pod := v1.Pod{
Status: v1.PodStatus{
Phase: v1.PodRunning,
},
ObjectMeta: v1.ObjectMeta{
Name: podName,
Namespace: namespace,
Labels: map[string]string{
"name": podName,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "containerName",
Image: "containerImage",
VolumeMounts: []v1.VolumeMount{
{
Name: "volumeMountName",
ReadOnly: false,
MountPath: "/mnt",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "volumeName",
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "pdName",
FSType: "ext4",
ReadOnly: false,
},
},
},
},
},
}
obj.Items = append(obj.Items, pod)
}
return true, obj, nil
})
fakeWatch := watch.NewFake()
fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
return fakeClient
}
// NewPod returns a test pod object
func NewPod(uid, name string) *v1.Pod {
return &v1.Pod{
ObjectMeta: v1.ObjectMeta{
UID: types.UID(uid),
Name: name,
Namespace: name,
},
}
}

View file

@ -0,0 +1,81 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"index.go",
"pv_controller.go",
"pv_controller_base.go",
"volume_host.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/errors:go_default_library",
"//pkg/api/meta:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/apis/storage/v1beta1:go_default_library",
"//pkg/apis/storage/v1beta1/util:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/core/v1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/conversion:go_default_library",
"//pkg/labels:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/goroutinemap:go_default_library",
"//pkg/util/io:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = [
"binder_test.go",
"delete_test.go",
"framework_test.go",
"index_test.go",
"provision_test.go",
"pv_controller_test.go",
"recycle_test.go",
],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/resource:go_default_library",
"//pkg/api/testapi:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/apis/storage/v1beta1:go_default_library",
"//pkg/apis/storage/v1beta1/util:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/client/testing/cache:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//pkg/conversion:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/diff:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/volume:go_default_library",
"//vendor:github.com/golang/glog",
],
)

View file

@ -0,0 +1,4 @@
assignees:
- jsafrane
- saad-ali
- thockin

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,228 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"errors"
"testing"
"k8s.io/kubernetes/pkg/api/v1"
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
)
// Test single call to syncVolume, expecting recycling to happen.
// 1. Fill in the controller with initial data
// 2. Call the syncVolume *once*.
// 3. Compare resulting volumes with expected volumes.
func TestDeleteSync(t *testing.T) {
tests := []controllerTest{
{
// delete volume bound by controller
"8-1 - successful delete",
newVolumeArray("volume8-1", "1Gi", "uid8-1", "claim8-1", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annBoundByController),
novolumes,
noclaims,
noclaims,
noevents, noerrors,
// Inject deleter into the controller and call syncVolume. The
// deleter simulates one delete() call that succeeds.
wrapTestWithReclaimCalls(operationDelete, []error{nil}, testSyncVolume),
},
{
// delete volume bound by user
"8-2 - successful delete with prebound volume",
newVolumeArray("volume8-2", "1Gi", "uid8-2", "claim8-2", v1.VolumeBound, v1.PersistentVolumeReclaimDelete),
novolumes,
noclaims,
noclaims,
noevents, noerrors,
// Inject deleter into the controller and call syncVolume. The
// deleter simulates one delete() call that succeeds.
wrapTestWithReclaimCalls(operationDelete, []error{nil}, testSyncVolume),
},
{
// delete failure - plugin not found
"8-3 - plugin not found",
newVolumeArray("volume8-3", "1Gi", "uid8-3", "claim8-3", v1.VolumeBound, v1.PersistentVolumeReclaimDelete),
withMessage("Error getting deleter volume plugin for volume \"volume8-3\": no volume plugin matched", newVolumeArray("volume8-3", "1Gi", "uid8-3", "claim8-3", v1.VolumeFailed, v1.PersistentVolumeReclaimDelete)),
noclaims,
noclaims,
[]string{"Warning VolumeFailedDelete"}, noerrors, testSyncVolume,
},
{
// delete failure - newDeleter returns error
"8-4 - newDeleter returns error",
newVolumeArray("volume8-4", "1Gi", "uid8-4", "claim8-4", v1.VolumeBound, v1.PersistentVolumeReclaimDelete),
withMessage("Failed to create deleter for volume \"volume8-4\": Mock plugin error: no deleteCalls configured", newVolumeArray("volume8-4", "1Gi", "uid8-4", "claim8-4", v1.VolumeFailed, v1.PersistentVolumeReclaimDelete)),
noclaims,
noclaims,
[]string{"Warning VolumeFailedDelete"}, noerrors,
wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume),
},
{
// delete failure - delete() returns error
"8-5 - delete returns error",
newVolumeArray("volume8-5", "1Gi", "uid8-5", "claim8-5", v1.VolumeBound, v1.PersistentVolumeReclaimDelete),
withMessage("Mock delete error", newVolumeArray("volume8-5", "1Gi", "uid8-5", "claim8-5", v1.VolumeFailed, v1.PersistentVolumeReclaimDelete)),
noclaims,
noclaims,
[]string{"Warning VolumeFailedDelete"}, noerrors,
wrapTestWithReclaimCalls(operationDelete, []error{errors.New("Mock delete error")}, testSyncVolume),
},
{
// delete success(?) - volume is deleted before doDelete() starts
"8-6 - volume is deleted before deleting",
newVolumeArray("volume8-6", "1Gi", "uid8-6", "claim8-6", v1.VolumeBound, v1.PersistentVolumeReclaimDelete),
novolumes,
noclaims,
noclaims,
noevents, noerrors,
wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
// Delete the volume before delete operation starts
reactor.lock.Lock()
delete(reactor.volumes, "volume8-6")
reactor.lock.Unlock()
}),
},
{
// delete success(?) - volume is bound just at the time doDelete()
// starts. This simulates "volume no longer needs recycling,
// skipping".
"8-7 - volume is bound before deleting",
newVolumeArray("volume8-7", "1Gi", "uid8-7", "claim8-7", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annBoundByController),
newVolumeArray("volume8-7", "1Gi", "uid8-7", "claim8-7", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annBoundByController),
noclaims,
newClaimArray("claim8-7", "uid8-7", "10Gi", "volume8-7", v1.ClaimBound),
noevents, noerrors,
wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
reactor.lock.Lock()
defer reactor.lock.Unlock()
// Bind the volume to resurrected claim (this should never
// happen)
claim := newClaim("claim8-7", "uid8-7", "10Gi", "volume8-7", v1.ClaimBound)
reactor.claims[claim.Name] = claim
ctrl.claims.Add(claim)
volume := reactor.volumes["volume8-7"]
volume.Status.Phase = v1.VolumeBound
}),
},
{
// delete success - volume bound by user is deleted, while a new
// claim is created with another UID.
"8-9 - prebound volume is deleted while the claim exists",
newVolumeArray("volume8-9", "1Gi", "uid8-9", "claim8-9", v1.VolumeBound, v1.PersistentVolumeReclaimDelete),
novolumes,
newClaimArray("claim8-9", "uid8-9-x", "10Gi", "", v1.ClaimPending),
newClaimArray("claim8-9", "uid8-9-x", "10Gi", "", v1.ClaimPending),
noevents, noerrors,
// Inject deleter into the controller and call syncVolume. The
// deleter simulates one delete() call that succeeds.
wrapTestWithReclaimCalls(operationDelete, []error{nil}, testSyncVolume),
},
{
// PV requires external deleter
"8-10 - external deleter",
newVolumeArray("volume8-10", "1Gi", "uid10-1", "claim10-1", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annBoundByController),
newVolumeArray("volume8-10", "1Gi", "uid10-1", "claim10-1", v1.VolumeReleased, v1.PersistentVolumeReclaimDelete, annBoundByController),
noclaims,
noclaims,
noevents, noerrors,
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
// Inject external deleter annotation
test.initialVolumes[0].Annotations[annDynamicallyProvisioned] = "external.io/test"
test.expectedVolumes[0].Annotations[annDynamicallyProvisioned] = "external.io/test"
return testSyncVolume(ctrl, reactor, test)
},
},
{
// delete success - two PVs are provisioned for a single claim.
// One of the PVs is deleted.
"8-11 - two PVs provisioned for a single claim",
[]*v1.PersistentVolume{
newVolume("volume8-11-1", "1Gi", "uid8-11", "claim8-11", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
newVolume("volume8-11-2", "1Gi", "uid8-11", "claim8-11", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
},
[]*v1.PersistentVolume{
newVolume("volume8-11-2", "1Gi", "uid8-11", "claim8-11", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
},
// the claim is bound to volume8-11-2 -> volume8-11-1 has lost the race and will be deleted
newClaimArray("claim8-11", "uid8-11", "10Gi", "volume8-11-2", v1.ClaimBound),
newClaimArray("claim8-11", "uid8-11", "10Gi", "volume8-11-2", v1.ClaimBound),
noevents, noerrors,
// Inject deleter into the controller and call syncVolume. The
// deleter simulates one delete() call that succeeds.
wrapTestWithReclaimCalls(operationDelete, []error{nil}, testSyncVolume),
},
{
// delete success - two PVs are externally provisioned for a single
// claim. One of the PVs is marked as Released to be deleted by the
// external provisioner.
"8-12 - two PVs externally provisioned for a single claim",
[]*v1.PersistentVolume{
newVolume("volume8-12-1", "1Gi", "uid8-12", "claim8-12", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
newVolume("volume8-12-2", "1Gi", "uid8-12", "claim8-12", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
},
[]*v1.PersistentVolume{
newVolume("volume8-12-1", "1Gi", "uid8-12", "claim8-12", v1.VolumeReleased, v1.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
newVolume("volume8-12-2", "1Gi", "uid8-12", "claim8-12", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
},
// the claim is bound to volume8-12-2 -> volume8-12-1 has lost the race and will be "Released"
newClaimArray("claim8-12", "uid8-12", "10Gi", "volume8-12-2", v1.ClaimBound),
newClaimArray("claim8-12", "uid8-12", "10Gi", "volume8-12-2", v1.ClaimBound),
noevents, noerrors,
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
// Inject external deleter annotation
test.initialVolumes[0].Annotations[annDynamicallyProvisioned] = "external.io/test"
test.expectedVolumes[0].Annotations[annDynamicallyProvisioned] = "external.io/test"
return testSyncVolume(ctrl, reactor, test)
},
},
}
runSyncTests(t, tests, []*storage.StorageClass{})
}
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
// volume/claims. The test follows this pattern:
// 0. Load the controller with initial data.
// 1. Call controllerTest.testCall() once as in TestSync()
// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls,
// call appropriate syncVolume/syncClaim (simulating "volume/claim changed"
// events). Go to 2. if these calls change anything.
// 3. When all changes are processed and no new changes were made, call
// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync").
// 4. If some changes were done by step 3., go to 2. (simulation of
// "volume/claim updated" events, eventually performing step 3. again)
// 5. When 3. does not do any changes, finish the tests and compare final set
// of volumes/claims with expected claims/volumes and report differences.
// Some limit of calls in enforced to prevent endless loops.
func TestDeleteMultiSync(t *testing.T) {
tests := []controllerTest{
{
// delete failure - delete returns error. The controller should
// try again.
"9-1 - delete returns error",
newVolumeArray("volume9-1", "1Gi", "uid9-1", "claim9-1", v1.VolumeBound, v1.PersistentVolumeReclaimDelete),
novolumes,
noclaims,
noclaims,
[]string{"Warning VolumeFailedDelete"}, noerrors,
wrapTestWithReclaimCalls(operationDelete, []error{errors.New("Mock delete error"), nil}, testSyncVolume),
},
}
runMultisyncTests(t, tests, []*storage.StorageClass{}, "")
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,282 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"fmt"
"sort"
"k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1beta1/util"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/labels"
)
// persistentVolumeOrderedIndex is a cache.Store that keeps persistent volumes
// indexed by AccessModes and ordered by storage capacity.
type persistentVolumeOrderedIndex struct {
store cache.Indexer
}
func newPersistentVolumeOrderedIndex() persistentVolumeOrderedIndex {
return persistentVolumeOrderedIndex{cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"accessmodes": accessModesIndexFunc})}
}
// accessModesIndexFunc is an indexing function that returns a persistent
// volume's AccessModes as a string
func accessModesIndexFunc(obj interface{}) ([]string, error) {
if pv, ok := obj.(*v1.PersistentVolume); ok {
modes := v1.GetAccessModesAsString(pv.Spec.AccessModes)
return []string{modes}, nil
}
return []string{""}, fmt.Errorf("object is not a persistent volume: %v", obj)
}
// listByAccessModes returns all volumes with the given set of
// AccessModeTypes. The list is unsorted!
func (pvIndex *persistentVolumeOrderedIndex) listByAccessModes(modes []v1.PersistentVolumeAccessMode) ([]*v1.PersistentVolume, error) {
pv := &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
AccessModes: modes,
},
}
objs, err := pvIndex.store.Index("accessmodes", pv)
if err != nil {
return nil, err
}
volumes := make([]*v1.PersistentVolume, len(objs))
for i, obj := range objs {
volumes[i] = obj.(*v1.PersistentVolume)
}
return volumes, nil
}
// matchPredicate is a function that indicates that a persistent volume matches another
type matchPredicate func(compareThis, toThis *v1.PersistentVolume) bool
// find returns the nearest PV from the ordered list or nil if a match is not found
func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *v1.PersistentVolumeClaim, matchPredicate matchPredicate) (*v1.PersistentVolume, error) {
// PVs are indexed by their access modes to allow easier searching. Each
// index is the string representation of a set of access modes. There is a
// finite number of possible sets and PVs will only be indexed in one of
// them (whichever index matches the PV's modes).
//
// A request for resources will always specify its desired access modes.
// Any matching PV must have at least that number of access modes, but it
// can have more. For example, a user asks for ReadWriteOnce but a GCEPD
// is available, which is ReadWriteOnce+ReadOnlyMany.
//
// Searches are performed against a set of access modes, so we can attempt
// not only the exact matching modes but also potential matches (the GCEPD
// example above).
allPossibleModes := pvIndex.allPossibleMatchingAccessModes(claim.Spec.AccessModes)
var smallestVolume *v1.PersistentVolume
var smallestVolumeSize int64
requestedQty := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestedSize := requestedQty.Value()
requestedClass := storageutil.GetClaimStorageClass(claim)
var selector labels.Selector
if claim.Spec.Selector != nil {
internalSelector, err := metav1.LabelSelectorAsSelector(claim.Spec.Selector)
if err != nil {
// should be unreachable code due to validation
return nil, fmt.Errorf("error creating internal label selector for claim: %v: %v", claimToClaimKey(claim), err)
}
selector = internalSelector
}
for _, modes := range allPossibleModes {
volumes, err := pvIndex.listByAccessModes(modes)
if err != nil {
return nil, err
}
// Go through all available volumes with two goals:
// - find a volume that is either pre-bound by user or dynamically
// provisioned for this claim. Because of this we need to loop through
// all volumes.
// - find the smallest matching one if there is no volume pre-bound to
// the claim.
for _, volume := range volumes {
if isVolumeBoundToClaim(volume, claim) {
// this claim and volume are pre-bound; return
// the volume if the size request is satisfied,
// otherwise continue searching for a match
volumeQty := volume.Spec.Capacity[v1.ResourceStorage]
volumeSize := volumeQty.Value()
if volumeSize < requestedSize {
continue
}
return volume, nil
}
// In Alpha dynamic provisioning, we do now want not match claims
// with existing PVs, findByClaim must find only PVs that are
// pre-bound to the claim (by dynamic provisioning). TODO: remove in
// 1.5
if v1.HasAnnotation(claim.ObjectMeta, storageutil.AlphaStorageClassAnnotation) {
continue
}
// filter out:
// - volumes bound to another claim
// - volumes whose labels don't match the claim's selector, if specified
// - volumes in Class that is not requested
if volume.Spec.ClaimRef != nil {
continue
} else if selector != nil && !selector.Matches(labels.Set(volume.Labels)) {
continue
}
if storageutil.GetVolumeStorageClass(volume) != requestedClass {
continue
}
volumeQty := volume.Spec.Capacity[v1.ResourceStorage]
volumeSize := volumeQty.Value()
if volumeSize >= requestedSize {
if smallestVolume == nil || smallestVolumeSize > volumeSize {
smallestVolume = volume
smallestVolumeSize = volumeSize
}
}
}
if smallestVolume != nil {
// Found a matching volume
return smallestVolume, nil
}
}
return nil, nil
}
// findBestMatchForClaim is a convenience method that finds a volume by the claim's AccessModes and requests for Storage
func (pvIndex *persistentVolumeOrderedIndex) findBestMatchForClaim(claim *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {
return pvIndex.findByClaim(claim, matchStorageCapacity)
}
// matchStorageCapacity is a matchPredicate used to sort and find volumes
func matchStorageCapacity(pvA, pvB *v1.PersistentVolume) bool {
aQty := pvA.Spec.Capacity[v1.ResourceStorage]
bQty := pvB.Spec.Capacity[v1.ResourceStorage]
aSize := aQty.Value()
bSize := bQty.Value()
return aSize <= bSize
}
// allPossibleMatchingAccessModes returns an array of AccessMode arrays that
// can satisfy a user's requested modes.
//
// see comments in the Find func above regarding indexing.
//
// allPossibleMatchingAccessModes gets all stringified accessmodes from the
// index and returns all those that contain at least all of the requested
// mode.
//
// For example, assume the index contains 2 types of PVs where the stringified
// accessmodes are:
//
// "RWO,ROX" -- some number of GCEPDs
// "RWO,ROX,RWX" -- some number of NFS volumes
//
// A request for RWO could be satisfied by both sets of indexed volumes, so
// allPossibleMatchingAccessModes returns:
//
// [][]v1.PersistentVolumeAccessMode {
// []v1.PersistentVolumeAccessMode {
// v1.ReadWriteOnce, v1.ReadOnlyMany,
// },
// []v1.PersistentVolumeAccessMode {
// v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany,
// },
// }
//
// A request for RWX can be satisfied by only one set of indexed volumes, so
// the return is:
//
// [][]v1.PersistentVolumeAccessMode {
// []v1.PersistentVolumeAccessMode {
// v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany,
// },
// }
//
// This func returns modes with ascending levels of modes to give the user
// what is closest to what they actually asked for.
func (pvIndex *persistentVolumeOrderedIndex) allPossibleMatchingAccessModes(requestedModes []v1.PersistentVolumeAccessMode) [][]v1.PersistentVolumeAccessMode {
matchedModes := [][]v1.PersistentVolumeAccessMode{}
keys := pvIndex.store.ListIndexFuncValues("accessmodes")
for _, key := range keys {
indexedModes := v1.GetAccessModesFromString(key)
if containedInAll(indexedModes, requestedModes) {
matchedModes = append(matchedModes, indexedModes)
}
}
// sort by the number of modes in each array with the fewest number of
// modes coming first. this allows searching for volumes by the minimum
// number of modes required of the possible matches.
sort.Sort(byAccessModes{matchedModes})
return matchedModes
}
func contains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
}
}
return false
}
func containedInAll(indexedModes []v1.PersistentVolumeAccessMode, requestedModes []v1.PersistentVolumeAccessMode) bool {
for _, mode := range requestedModes {
if !contains(indexedModes, mode) {
return false
}
}
return true
}
// byAccessModes is used to order access modes by size, with the fewest modes first
type byAccessModes struct {
modes [][]v1.PersistentVolumeAccessMode
}
func (c byAccessModes) Less(i, j int) bool {
return len(c.modes[i]) < len(c.modes[j])
}
func (c byAccessModes) Swap(i, j int) {
c.modes[i], c.modes[j] = c.modes[j], c.modes[i]
}
func (c byAccessModes) Len() int {
return len(c.modes)
}
func claimToClaimKey(claim *v1.PersistentVolumeClaim) string {
return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name)
}
func claimrefToClaimKey(claimref *v1.ObjectReference) string {
return fmt.Sprintf("%s/%s", claimref.Namespace, claimref.Name)
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,18 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["options.go"],
tags = ["automanaged"],
deps = ["//vendor:github.com/spf13/pflag"],
)

View file

@ -0,0 +1,91 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"time"
"github.com/spf13/pflag"
)
// VolumeConfigFlags is used to bind CLI flags to variables. This top-level struct contains *all* enumerated
// CLI flags meant to configure all volume plugins. From this config, the binary will create many instances
// of volume.VolumeConfig which are then passed to the appropriate plugin. The ControllerManager binary is the only
// part of the code which knows what plugins are supported and which CLI flags correspond to each plugin.
type VolumeConfigFlags struct {
PersistentVolumeRecyclerMaximumRetry int
PersistentVolumeRecyclerMinimumTimeoutNFS int
PersistentVolumeRecyclerPodTemplateFilePathNFS string
PersistentVolumeRecyclerIncrementTimeoutNFS int
PersistentVolumeRecyclerPodTemplateFilePathHostPath string
PersistentVolumeRecyclerMinimumTimeoutHostPath int
PersistentVolumeRecyclerIncrementTimeoutHostPath int
EnableHostPathProvisioning bool
EnableDynamicProvisioning bool
}
type PersistentVolumeControllerOptions struct {
PVClaimBinderSyncPeriod time.Duration
VolumeConfigFlags VolumeConfigFlags
}
func NewPersistentVolumeControllerOptions() PersistentVolumeControllerOptions {
return PersistentVolumeControllerOptions{
PVClaimBinderSyncPeriod: 15 * time.Second,
VolumeConfigFlags: VolumeConfigFlags{
// default values here
PersistentVolumeRecyclerMaximumRetry: 3,
PersistentVolumeRecyclerMinimumTimeoutNFS: 300,
PersistentVolumeRecyclerIncrementTimeoutNFS: 30,
PersistentVolumeRecyclerMinimumTimeoutHostPath: 60,
PersistentVolumeRecyclerIncrementTimeoutHostPath: 30,
EnableHostPathProvisioning: false,
EnableDynamicProvisioning: true,
},
}
}
func (o *PersistentVolumeControllerOptions) AddFlags(fs *pflag.FlagSet) {
fs.DurationVar(&o.PVClaimBinderSyncPeriod, "pvclaimbinder-sync-period", o.PVClaimBinderSyncPeriod,
"The period for syncing persistent volumes and persistent volume claims")
fs.StringVar(&o.VolumeConfigFlags.PersistentVolumeRecyclerPodTemplateFilePathNFS,
"pv-recycler-pod-template-filepath-nfs", o.VolumeConfigFlags.PersistentVolumeRecyclerPodTemplateFilePathNFS,
"The file path to a pod definition used as a template for NFS persistent volume recycling")
fs.IntVar(&o.VolumeConfigFlags.PersistentVolumeRecyclerMinimumTimeoutNFS, "pv-recycler-minimum-timeout-nfs",
o.VolumeConfigFlags.PersistentVolumeRecyclerMinimumTimeoutNFS, "The minimum ActiveDeadlineSeconds to use for an NFS Recycler pod")
fs.IntVar(&o.VolumeConfigFlags.PersistentVolumeRecyclerIncrementTimeoutNFS, "pv-recycler-increment-timeout-nfs",
o.VolumeConfigFlags.PersistentVolumeRecyclerIncrementTimeoutNFS, "the increment of time added per Gi to ActiveDeadlineSeconds for an NFS scrubber pod")
fs.StringVar(&o.VolumeConfigFlags.PersistentVolumeRecyclerPodTemplateFilePathHostPath, "pv-recycler-pod-template-filepath-hostpath",
o.VolumeConfigFlags.PersistentVolumeRecyclerPodTemplateFilePathHostPath,
"The file path to a pod definition used as a template for HostPath persistent volume recycling. "+
"This is for development and testing only and will not work in a multi-node cluster.")
fs.IntVar(&o.VolumeConfigFlags.PersistentVolumeRecyclerMinimumTimeoutHostPath, "pv-recycler-minimum-timeout-hostpath",
o.VolumeConfigFlags.PersistentVolumeRecyclerMinimumTimeoutHostPath,
"The minimum ActiveDeadlineSeconds to use for a HostPath Recycler pod. This is for development and testing only and will not work in a multi-node cluster.")
fs.IntVar(&o.VolumeConfigFlags.PersistentVolumeRecyclerIncrementTimeoutHostPath, "pv-recycler-timeout-increment-hostpath",
o.VolumeConfigFlags.PersistentVolumeRecyclerIncrementTimeoutHostPath,
"the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. "+
"This is for development and testing only and will not work in a multi-node cluster.")
fs.IntVar(&o.VolumeConfigFlags.PersistentVolumeRecyclerMaximumRetry, "pv-recycler-maximum-retry",
o.VolumeConfigFlags.PersistentVolumeRecyclerMaximumRetry,
"Maximum number of attempts to recycle or delete a persistent volume")
fs.BoolVar(&o.VolumeConfigFlags.EnableHostPathProvisioning, "enable-hostpath-provisioner", o.VolumeConfigFlags.EnableHostPathProvisioning,
"Enable HostPath PV provisioning when running without a cloud provider. This allows testing and development of provisioning features. "+
"HostPath provisioning is not supported in any way, won't work in a multi-node cluster, and should not be used for anything other than testing or development.")
fs.BoolVar(&o.VolumeConfigFlags.EnableDynamicProvisioning, "enable-dynamic-provisioning", o.VolumeConfigFlags.EnableDynamicProvisioning,
"Enable dynamic provisioning for environments that support it.")
}

View file

@ -0,0 +1,430 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"errors"
"testing"
"k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1beta1/util"
)
var class1Parameters = map[string]string{
"param1": "value1",
}
var class2Parameters = map[string]string{
"param2": "value2",
}
var storageClasses = []*storage.StorageClass{
{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: v1.ObjectMeta{
Name: "gold",
},
Provisioner: mockPluginName,
Parameters: class1Parameters,
},
{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: v1.ObjectMeta{
Name: "silver",
},
Provisioner: mockPluginName,
Parameters: class2Parameters,
},
{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: v1.ObjectMeta{
Name: "external",
},
Provisioner: "vendor.com/my-volume",
Parameters: class1Parameters,
},
{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: v1.ObjectMeta{
Name: "unknown-internal",
},
Provisioner: "kubernetes.io/unknown",
Parameters: class1Parameters,
},
}
// call to storageClass 1, returning an error
var provision1Error = provisionCall{
ret: errors.New("Mock provisioner error"),
expectedParameters: class1Parameters,
}
// call to storageClass 1, returning a valid PV
var provision1Success = provisionCall{
ret: nil,
expectedParameters: class1Parameters,
}
// call to storageClass 2, returning a valid PV
var provision2Success = provisionCall{
ret: nil,
expectedParameters: class2Parameters,
}
var provisionAlphaSuccess = provisionCall{
ret: nil,
}
// Test single call to syncVolume, expecting provisioning to happen.
// 1. Fill in the controller with initial data
// 2. Call the syncVolume *once*.
// 3. Compare resulting volumes with expected volumes.
func TestProvisionSync(t *testing.T) {
tests := []controllerTest{
{
// Provision a volume (with a default class)
"11-1 - successful provision with storage class 1",
novolumes,
newVolumeArray("pvc-uid11-1", "1Gi", "uid11-1", "claim11-1", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned, storageutil.StorageClassAnnotation),
newClaimArray("claim11-1", "uid11-1", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation),
// Binding will be completed in the next syncClaim
newClaimArray("claim11-1", "uid11-1", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation, annStorageProvisioner),
[]string{"Normal ProvisioningSucceeded"}, noerrors, wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim),
},
{
// Provision failure - plugin not found
"11-2 - plugin not found",
novolumes,
novolumes,
newClaimArray("claim11-2", "uid11-2", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation),
newClaimArray("claim11-2", "uid11-2", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation),
[]string{"Warning ProvisioningFailed"}, noerrors,
testSyncClaim,
},
{
// Provision failure - newProvisioner returns error
"11-3 - newProvisioner failure",
novolumes,
novolumes,
newClaimArray("claim11-3", "uid11-3", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation),
newClaimArray("claim11-3", "uid11-3", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation, annStorageProvisioner),
[]string{"Warning ProvisioningFailed"}, noerrors,
wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim),
},
{
// Provision failure - Provision returns error
"11-4 - provision failure",
novolumes,
novolumes,
newClaimArray("claim11-4", "uid11-4", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation),
newClaimArray("claim11-4", "uid11-4", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation, annStorageProvisioner),
[]string{"Warning ProvisioningFailed"}, noerrors,
wrapTestWithProvisionCalls([]provisionCall{provision1Error}, testSyncClaim),
},
{
// No provisioning if there is a matching volume available
"11-6 - provisioning when there is a volume available",
newVolumeArray("volume11-6", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, storageutil.StorageClassAnnotation),
newVolumeArray("volume11-6", "1Gi", "uid11-6", "claim11-6", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController, storageutil.StorageClassAnnotation),
newClaimArray("claim11-6", "uid11-6", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation),
newClaimArray("claim11-6", "uid11-6", "1Gi", "volume11-6", v1.ClaimBound, storageutil.StorageClassAnnotation, annBoundByController, annBindCompleted),
noevents, noerrors,
// No provisioning plugin confingure - makes the test fail when
// the controller errorneously tries to provision something
wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim),
},
{
// Provision success? - claim is bound before provisioner creates
// a volume.
"11-7 - claim is bound before provisioning",
novolumes,
newVolumeArray("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned, storageutil.StorageClassAnnotation),
newClaimArray("claim11-7", "uid11-7", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation),
// The claim would be bound in next syncClaim
newClaimArray("claim11-7", "uid11-7", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation, annStorageProvisioner),
noevents, noerrors,
wrapTestWithInjectedOperation(wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
// Create a volume before provisionClaimOperation starts.
// This similates a parallel controller provisioning the volume.
reactor.lock.Lock()
volume := newVolume("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned, storageutil.StorageClassAnnotation)
reactor.volumes[volume.Name] = volume
reactor.lock.Unlock()
}),
},
{
// Provision success - cannot save provisioned PV once,
// second retry succeeds
"11-8 - cannot save provisioned volume",
novolumes,
newVolumeArray("pvc-uid11-8", "1Gi", "uid11-8", "claim11-8", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned, storageutil.StorageClassAnnotation),
newClaimArray("claim11-8", "uid11-8", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation),
// Binding will be completed in the next syncClaim
newClaimArray("claim11-8", "uid11-8", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation, annStorageProvisioner),
[]string{"Normal ProvisioningSucceeded"},
[]reactorError{
// Inject error to the first
// kubeclient.PersistentVolumes.Create() call. All other calls
// will succeed.
{"create", "persistentvolumes", errors.New("Mock creation error")},
},
wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim),
},
{
// Provision success? - cannot save provisioned PV five times,
// volume is deleted and delete succeeds
"11-9 - cannot save provisioned volume, delete succeeds",
novolumes,
novolumes,
newClaimArray("claim11-9", "uid11-9", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation),
newClaimArray("claim11-9", "uid11-9", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation, annStorageProvisioner),
[]string{"Warning ProvisioningFailed"},
[]reactorError{
// Inject error to five kubeclient.PersistentVolumes.Create()
// calls
{"create", "persistentvolumes", errors.New("Mock creation error1")},
{"create", "persistentvolumes", errors.New("Mock creation error2")},
{"create", "persistentvolumes", errors.New("Mock creation error3")},
{"create", "persistentvolumes", errors.New("Mock creation error4")},
{"create", "persistentvolumes", errors.New("Mock creation error5")},
},
wrapTestWithPluginCalls(
nil, // recycle calls
[]error{nil}, // delete calls
[]provisionCall{provision1Success}, // provision calls
testSyncClaim,
),
},
{
// Provision failure - cannot save provisioned PV five times,
// volume delete failed - no plugin found
"11-10 - cannot save provisioned volume, no delete plugin found",
novolumes,
novolumes,
newClaimArray("claim11-10", "uid11-10", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation),
newClaimArray("claim11-10", "uid11-10", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation, annStorageProvisioner),
[]string{"Warning ProvisioningFailed", "Warning ProvisioningCleanupFailed"},
[]reactorError{
// Inject error to five kubeclient.PersistentVolumes.Create()
// calls
{"create", "persistentvolumes", errors.New("Mock creation error1")},
{"create", "persistentvolumes", errors.New("Mock creation error2")},
{"create", "persistentvolumes", errors.New("Mock creation error3")},
{"create", "persistentvolumes", errors.New("Mock creation error4")},
{"create", "persistentvolumes", errors.New("Mock creation error5")},
},
// No deleteCalls are configured, which results into no deleter plugin available for the volume
wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim),
},
{
// Provision failure - cannot save provisioned PV five times,
// volume delete failed - deleter returns error five times
"11-11 - cannot save provisioned volume, deleter fails",
novolumes,
novolumes,
newClaimArray("claim11-11", "uid11-11", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation),
newClaimArray("claim11-11", "uid11-11", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation, annStorageProvisioner),
[]string{"Warning ProvisioningFailed", "Warning ProvisioningCleanupFailed"},
[]reactorError{
// Inject error to five kubeclient.PersistentVolumes.Create()
// calls
{"create", "persistentvolumes", errors.New("Mock creation error1")},
{"create", "persistentvolumes", errors.New("Mock creation error2")},
{"create", "persistentvolumes", errors.New("Mock creation error3")},
{"create", "persistentvolumes", errors.New("Mock creation error4")},
{"create", "persistentvolumes", errors.New("Mock creation error5")},
},
wrapTestWithPluginCalls(
nil, // recycle calls
[]error{ // delete calls
errors.New("Mock deletion error1"),
errors.New("Mock deletion error2"),
errors.New("Mock deletion error3"),
errors.New("Mock deletion error4"),
errors.New("Mock deletion error5"),
},
[]provisionCall{provision1Success}, // provision calls
testSyncClaim),
},
{
// Provision failure - cannot save provisioned PV five times,
// volume delete succeeds 2nd time
"11-12 - cannot save provisioned volume, delete succeeds 2nd time",
novolumes,
novolumes,
newClaimArray("claim11-12", "uid11-12", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation),
newClaimArray("claim11-12", "uid11-12", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation, annStorageProvisioner),
[]string{"Warning ProvisioningFailed"},
[]reactorError{
// Inject error to five kubeclient.PersistentVolumes.Create()
// calls
{"create", "persistentvolumes", errors.New("Mock creation error1")},
{"create", "persistentvolumes", errors.New("Mock creation error2")},
{"create", "persistentvolumes", errors.New("Mock creation error3")},
{"create", "persistentvolumes", errors.New("Mock creation error4")},
{"create", "persistentvolumes", errors.New("Mock creation error5")},
},
wrapTestWithPluginCalls(
nil, // recycle calls
[]error{ // delete calls
errors.New("Mock deletion error1"),
nil,
}, // provison calls
[]provisionCall{provision1Success},
testSyncClaim,
),
},
{
// Provision a volume (with non-default class)
"11-13 - successful provision with storage class 2",
novolumes,
volumeWithClass("silver", newVolumeArray("pvc-uid11-13", "1Gi", "uid11-13", "claim11-13", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned)),
claimWithClass("silver", newClaimArray("claim11-13", "uid11-13", "1Gi", "", v1.ClaimPending)),
// Binding will be completed in the next syncClaim
claimWithClass("silver", newClaimArray("claim11-13", "uid11-13", "1Gi", "", v1.ClaimPending, annStorageProvisioner)),
[]string{"Normal ProvisioningSucceeded"}, noerrors, wrapTestWithProvisionCalls([]provisionCall{provision2Success}, testSyncClaim),
},
{
// Provision error - non existing class
"11-14 - fail due to non-existing class",
novolumes,
novolumes,
claimWithClass("non-existing", newClaimArray("claim11-14", "uid11-14", "1Gi", "", v1.ClaimPending)),
claimWithClass("non-existing", newClaimArray("claim11-14", "uid11-14", "1Gi", "", v1.ClaimPending)),
noevents, noerrors, wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim),
},
{
// No provisioning with class=""
"11-15 - no provisioning with class=''",
novolumes,
novolumes,
claimWithClass("", newClaimArray("claim11-15", "uid11-15", "1Gi", "", v1.ClaimPending)),
claimWithClass("", newClaimArray("claim11-15", "uid11-15", "1Gi", "", v1.ClaimPending)),
noevents, noerrors, wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim),
},
{
// No provisioning with class=nil
"11-16 - no provisioning with class=nil",
novolumes,
novolumes,
newClaimArray("claim11-15", "uid11-15", "1Gi", "", v1.ClaimPending),
newClaimArray("claim11-15", "uid11-15", "1Gi", "", v1.ClaimPending),
noevents, noerrors, wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim),
},
{
// No provisioning + normal event with external provisioner
"11-17 - external provisioner",
novolumes,
novolumes,
claimWithClass("external", newClaimArray("claim11-17", "uid11-17", "1Gi", "", v1.ClaimPending)),
claimWithAnnotation(annStorageProvisioner, "vendor.com/my-volume",
claimWithClass("external", newClaimArray("claim11-17", "uid11-17", "1Gi", "", v1.ClaimPending))),
[]string{"Normal ExternalProvisioning"},
noerrors, wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim),
},
{
// No provisioning + warning event with unknown internal provisioner
"11-18 - unknown internal provisioner",
novolumes,
novolumes,
claimWithClass("unknown-internal", newClaimArray("claim11-18", "uid11-18", "1Gi", "", v1.ClaimPending)),
claimWithClass("unknown-internal", newClaimArray("claim11-18", "uid11-18", "1Gi", "", v1.ClaimPending)),
[]string{"Warning ProvisioningFailed"},
noerrors, wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim),
},
}
runSyncTests(t, tests, storageClasses)
}
func TestAlphaProvisionSync(t *testing.T) {
tests := []controllerTest{
{
// Provision a volume with alpha annotation
"14-1 - successful alpha provisioning",
novolumes,
newVolumeArray("pvc-uid14-1", "1Gi", "uid14-1", "claim14-1", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned),
newClaimArray("claim14-1", "uid14-1", "1Gi", "", v1.ClaimPending, storageutil.AlphaStorageClassAnnotation),
// Binding will be completed in the next syncClaim
newClaimArray("claim14-1", "uid14-1", "1Gi", "", v1.ClaimPending, storageutil.AlphaStorageClassAnnotation, annStorageProvisioner),
noevents, noerrors, wrapTestWithProvisionCalls([]provisionCall{provisionAlphaSuccess}, testSyncClaim),
},
{
// Provision success - there is already a volume available, still
// we provision a new one when requested.
"14-2 - no alpha provisioning when there is a volume available",
newVolumeArray("volume14-2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
[]*v1.PersistentVolume{
newVolume("volume14-2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolume("pvc-uid14-2", "1Gi", "uid14-2", "claim14-2", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned),
},
newClaimArray("claim14-2", "uid14-2", "1Gi", "", v1.ClaimPending, storageutil.AlphaStorageClassAnnotation),
// Binding will be completed in the next syncClaim
newClaimArray("claim14-2", "uid14-2", "1Gi", "", v1.ClaimPending, storageutil.AlphaStorageClassAnnotation, annStorageProvisioner),
noevents, noerrors, wrapTestWithProvisionCalls([]provisionCall{provisionAlphaSuccess}, testSyncClaim),
},
}
runSyncTests(t, tests, []*storage.StorageClass{})
}
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
// volume/claims. The test follows this pattern:
// 0. Load the controller with initial data.
// 1. Call controllerTest.testCall() once as in TestSync()
// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls,
// call appropriate syncVolume/syncClaim (simulating "volume/claim changed"
// events). Go to 2. if these calls change anything.
// 3. When all changes are processed and no new changes were made, call
// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync").
// 4. If some changes were done by step 3., go to 2. (simulation of
// "volume/claim updated" events, eventually performing step 3. again)
// 5. When 3. does not do any changes, finish the tests and compare final set
// of volumes/claims with expected claims/volumes and report differences.
// Some limit of calls in enforced to prevent endless loops.
func TestProvisionMultiSync(t *testing.T) {
tests := []controllerTest{
{
// Provision a volume with binding
"12-1 - successful provision",
novolumes,
newVolumeArray("pvc-uid12-1", "1Gi", "uid12-1", "claim12-1", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned, storageutil.StorageClassAnnotation),
newClaimArray("claim12-1", "uid12-1", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation),
newClaimArray("claim12-1", "uid12-1", "1Gi", "pvc-uid12-1", v1.ClaimBound, storageutil.StorageClassAnnotation, annBoundByController, annBindCompleted, annStorageProvisioner),
noevents, noerrors, wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim),
},
}
runMultisyncTests(t, tests, storageClasses, storageClasses[0].Name)
}
// When provisioning is disabled, provisioning a claim should instantly return nil
func TestDisablingDynamicProvisioner(t *testing.T) {
ctrl := newTestController(nil, nil, nil, nil, false)
retVal := ctrl.provisionClaim(nil)
if retVal != nil {
t.Errorf("Expected nil return but got %v", retVal)
}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,285 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"testing"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
fcache "k8s.io/kubernetes/pkg/client/testing/cache"
)
// Test the real controller methods (add/update/delete claim/volume) with
// a fake API server.
// There is no controller API to 'initiate syncAll now', therefore these tests
// can't reliably simulate periodic sync of volumes/claims - it would be
// either very timing-sensitive or slow to wait for real periodic sync.
func TestControllerSync(t *testing.T) {
tests := []controllerTest{
// [Unit test set 5] - controller tests.
// We test the controller as if
// it was connected to real API server, i.e. we call add/update/delete
// Claim/Volume methods. Also, all changes to volumes and claims are
// sent to add/update/delete Claim/Volume as real controller would do.
{
// addClaim gets a new claim. Check it's bound to a volume.
"5-2 - complete bind",
newVolumeArray("volume5-2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume5-2", "1Gi", "uid5-2", "claim5-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
noclaims, /* added in testAddClaim5_2 */
newClaimArray("claim5-2", "uid5-2", "1Gi", "volume5-2", v1.ClaimBound, annBoundByController, annBindCompleted),
noevents, noerrors,
// Custom test function that generates an add event
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
claim := newClaim("claim5-2", "uid5-2", "1Gi", "", v1.ClaimPending)
reactor.addClaimEvent(claim)
return nil
},
},
{
// deleteClaim with a bound claim makes bound volume released.
"5-3 - delete claim",
newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", v1.VolumeReleased, v1.PersistentVolumeReclaimRetain, annBoundByController),
newClaimArray("claim5-3", "uid5-3", "1Gi", "volume5-3", v1.ClaimBound, annBoundByController, annBindCompleted),
noclaims,
noevents, noerrors,
// Custom test function that generates a delete event
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
obj := ctrl.claims.List()[0]
claim := obj.(*v1.PersistentVolumeClaim)
reactor.deleteClaimEvent(claim)
return nil
},
},
{
// deleteVolume with a bound volume. Check the claim is Lost.
"5-4 - delete volume",
newVolumeArray("volume5-4", "1Gi", "uid5-4", "claim5-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
novolumes,
newClaimArray("claim5-4", "uid5-4", "1Gi", "volume5-4", v1.ClaimBound, annBoundByController, annBindCompleted),
newClaimArray("claim5-4", "uid5-4", "1Gi", "volume5-4", v1.ClaimLost, annBoundByController, annBindCompleted),
[]string{"Warning ClaimLost"}, noerrors,
// Custom test function that generates a delete event
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
obj := ctrl.volumes.store.List()[0]
volume := obj.(*v1.PersistentVolume)
reactor.deleteVolumeEvent(volume)
return nil
},
},
{
// addVolume with provisioned volume from Kubernetes 1.2. No "action"
// is expected - it should stay bound.
"5-5 - add bound volume from 1.2",
novolumes,
[]*v1.PersistentVolume{addVolumeAnnotation(newVolume("volume5-5", "1Gi", "uid5-5", "claim5-5", v1.VolumeBound, v1.PersistentVolumeReclaimDelete), pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue)},
newClaimArray("claim5-5", "uid5-5", "1Gi", "", v1.ClaimPending),
newClaimArray("claim5-5", "uid5-5", "1Gi", "volume5-5", v1.ClaimBound, annBindCompleted, annBoundByController),
noevents, noerrors,
// Custom test function that generates a add event
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
volume := newVolume("volume5-5", "1Gi", "uid5-5", "claim5-5", v1.VolumeBound, v1.PersistentVolumeReclaimDelete)
volume = addVolumeAnnotation(volume, pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue)
reactor.addVolumeEvent(volume)
return nil
},
},
{
// updateVolume with provisioned volume from Kubernetes 1.2. No
// "action" is expected - it should stay bound.
"5-6 - update bound volume from 1.2",
[]*v1.PersistentVolume{addVolumeAnnotation(newVolume("volume5-6", "1Gi", "uid5-6", "claim5-6", v1.VolumeBound, v1.PersistentVolumeReclaimDelete), pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue)},
[]*v1.PersistentVolume{addVolumeAnnotation(newVolume("volume5-6", "1Gi", "uid5-6", "claim5-6", v1.VolumeBound, v1.PersistentVolumeReclaimDelete), pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue)},
newClaimArray("claim5-6", "uid5-6", "1Gi", "volume5-6", v1.ClaimBound),
newClaimArray("claim5-6", "uid5-6", "1Gi", "volume5-6", v1.ClaimBound, annBindCompleted),
noevents, noerrors,
// Custom test function that generates a add event
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
volume := newVolume("volume5-6", "1Gi", "uid5-6", "claim5-6", v1.VolumeBound, v1.PersistentVolumeReclaimDelete)
volume = addVolumeAnnotation(volume, pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue)
reactor.modifyVolumeEvent(volume)
return nil
},
},
{
// addVolume with unprovisioned volume from Kubernetes 1.2. The
// volume should be deleted.
"5-7 - add unprovisioned volume from 1.2",
novolumes,
novolumes,
newClaimArray("claim5-7", "uid5-7", "1Gi", "", v1.ClaimPending),
newClaimArray("claim5-7", "uid5-7", "1Gi", "", v1.ClaimPending),
noevents, noerrors,
// Custom test function that generates a add event
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
volume := newVolume("volume5-7", "1Gi", "uid5-7", "claim5-7", v1.VolumeBound, v1.PersistentVolumeReclaimDelete)
volume = addVolumeAnnotation(volume, pvProvisioningRequiredAnnotationKey, "yes")
reactor.addVolumeEvent(volume)
return nil
},
},
{
// updateVolume with unprovisioned volume from Kubernetes 1.2. The
// volume should be deleted.
"5-8 - update bound volume from 1.2",
novolumes,
novolumes,
newClaimArray("claim5-8", "uid5-8", "1Gi", "", v1.ClaimPending),
newClaimArray("claim5-8", "uid5-8", "1Gi", "", v1.ClaimPending),
noevents, noerrors,
// Custom test function that generates a add event
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
volume := newVolume("volume5-8", "1Gi", "uid5-8", "claim5-8", v1.VolumeBound, v1.PersistentVolumeReclaimDelete)
volume = addVolumeAnnotation(volume, pvProvisioningRequiredAnnotationKey, "yes")
reactor.modifyVolumeEvent(volume)
return nil
},
},
}
for _, test := range tests {
glog.V(4).Infof("starting test %q", test.name)
// Initialize the controller
client := &fake.Clientset{}
volumeSource := fcache.NewFakePVControllerSource()
claimSource := fcache.NewFakePVCControllerSource()
ctrl := newTestController(client, volumeSource, claimSource, nil, true)
reactor := newVolumeReactor(client, ctrl, volumeSource, claimSource, test.errors)
for _, claim := range test.initialClaims {
claimSource.Add(claim)
reactor.claims[claim.Name] = claim
}
for _, volume := range test.initialVolumes {
volumeSource.Add(volume)
reactor.volumes[volume.Name] = volume
}
// Start the controller
stopCh := make(chan struct{})
ctrl.Run(stopCh)
// Wait for the controller to pass initial sync and fill its caches.
for !ctrl.volumeController.HasSynced() ||
!ctrl.claimController.HasSynced() ||
len(ctrl.claims.ListKeys()) < len(test.initialClaims) ||
len(ctrl.volumes.store.ListKeys()) < len(test.initialVolumes) {
time.Sleep(10 * time.Millisecond)
}
glog.V(4).Infof("controller synced, starting test")
// Call the tested function
err := test.test(ctrl, reactor, test)
if err != nil {
t.Errorf("Test %q initial test call failed: %v", test.name, err)
}
// Simulate a periodic resync, just in case some events arrived in a
// wrong order.
ctrl.claims.Resync()
ctrl.volumes.store.Resync()
err = reactor.waitTest(test)
if err != nil {
t.Errorf("Failed to run test %s: %v", test.name, err)
}
close(stopCh)
evaluateTestResults(ctrl, reactor, test, t)
}
}
func storeVersion(t *testing.T, prefix string, c cache.Store, version string, expectedReturn bool) {
pv := newVolume("pvName", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimDelete)
pv.ResourceVersion = version
ret, err := storeObjectUpdate(c, pv, "volume")
if err != nil {
t.Errorf("%s: expected storeObjectUpdate to succeed, got: %v", prefix, err)
}
if expectedReturn != ret {
t.Errorf("%s: expected storeObjectUpdate to return %v, got: %v", prefix, expectedReturn, ret)
}
// find the stored version
pvObj, found, err := c.GetByKey("pvName")
if err != nil {
t.Errorf("expected volume 'pvName' in the cache, got error instead: %v", err)
}
if !found {
t.Errorf("expected volume 'pvName' in the cache but it was not found")
}
pv, ok := pvObj.(*v1.PersistentVolume)
if !ok {
t.Errorf("expected volume in the cache, got different object instead: %#v", pvObj)
}
if ret {
if pv.ResourceVersion != version {
t.Errorf("expected volume with version %s in the cache, got %s instead", version, pv.ResourceVersion)
}
} else {
if pv.ResourceVersion == version {
t.Errorf("expected volume with version other than %s in the cache, got %s instead", version, pv.ResourceVersion)
}
}
}
// TestControllerCache tests func storeObjectUpdate()
func TestControllerCache(t *testing.T) {
// Cache under test
c := cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc)
// Store new PV
storeVersion(t, "Step1", c, "1", true)
// Store the same PV
storeVersion(t, "Step2", c, "1", true)
// Store newer PV
storeVersion(t, "Step3", c, "2", true)
// Store older PV - simulating old "PV updated" event or periodic sync with
// old data
storeVersion(t, "Step4", c, "1", false)
// Store newer PV - test integer parsing ("2" > "10" as string,
// while 2 < 10 as integers)
storeVersion(t, "Step5", c, "10", true)
}
func TestControllerCacheParsingError(t *testing.T) {
c := cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc)
// There must be something in the cache to compare with
storeVersion(t, "Step1", c, "1", true)
pv := newVolume("pvName", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimDelete)
pv.ResourceVersion = "xxx"
_, err := storeObjectUpdate(c, pv, "volume")
if err == nil {
t.Errorf("Expected parsing error, got nil instead")
}
}
func addVolumeAnnotation(volume *v1.PersistentVolume, annName, annValue string) *v1.PersistentVolume {
if volume.Annotations == nil {
volume.Annotations = make(map[string]string)
}
volume.Annotations[annName] = annValue
return volume
}

View file

@ -0,0 +1,197 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"errors"
"testing"
"k8s.io/kubernetes/pkg/api/v1"
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
)
// Test single call to syncVolume, expecting recycling to happen.
// 1. Fill in the controller with initial data
// 2. Call the syncVolume *once*.
// 3. Compare resulting volumes with expected volumes.
func TestRecycleSync(t *testing.T) {
tests := []controllerTest{
{
// recycle volume bound by controller
"6-1 - successful recycle",
newVolumeArray("volume6-1", "1Gi", "uid6-1", "claim6-1", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle, annBoundByController),
newVolumeArray("volume6-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRecycle),
noclaims,
noclaims,
noevents, noerrors,
// Inject recycler into the controller and call syncVolume. The
// recycler simulates one recycle() call that succeeds.
wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume),
},
{
// recycle volume bound by user
"6-2 - successful recycle with prebound volume",
newVolumeArray("volume6-2", "1Gi", "uid6-2", "claim6-2", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle),
newVolumeArray("volume6-2", "1Gi", "", "claim6-2", v1.VolumeAvailable, v1.PersistentVolumeReclaimRecycle),
noclaims,
noclaims,
noevents, noerrors,
// Inject recycler into the controller and call syncVolume. The
// recycler simulates one recycle() call that succeeds.
wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume),
},
{
// recycle failure - plugin not found
"6-3 - plugin not found",
newVolumeArray("volume6-3", "1Gi", "uid6-3", "claim6-3", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle),
withMessage("No recycler plugin found for the volume!", newVolumeArray("volume6-3", "1Gi", "uid6-3", "claim6-3", v1.VolumeFailed, v1.PersistentVolumeReclaimRecycle)),
noclaims,
noclaims,
[]string{"Warning VolumeFailedRecycle"}, noerrors, testSyncVolume,
},
{
// recycle failure - newRecycler returns error
"6-4 - newRecycler returns error",
newVolumeArray("volume6-4", "1Gi", "uid6-4", "claim6-4", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle),
withMessage("Failed to create recycler: Mock plugin error: no recycleCalls configured", newVolumeArray("volume6-4", "1Gi", "uid6-4", "claim6-4", v1.VolumeFailed, v1.PersistentVolumeReclaimRecycle)),
noclaims,
noclaims,
[]string{"Warning VolumeFailedRecycle"}, noerrors,
wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume),
},
{
// recycle failure - recycle returns error
"6-5 - recycle returns error",
newVolumeArray("volume6-5", "1Gi", "uid6-5", "claim6-5", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle),
withMessage("Recycler failed: Mock recycle error", newVolumeArray("volume6-5", "1Gi", "uid6-5", "claim6-5", v1.VolumeFailed, v1.PersistentVolumeReclaimRecycle)),
noclaims,
noclaims,
[]string{"Warning VolumeFailedRecycle"}, noerrors,
wrapTestWithReclaimCalls(operationRecycle, []error{errors.New("Mock recycle error")}, testSyncVolume),
},
{
// recycle success(?) - volume is deleted before doRecycle() starts
"6-6 - volume is deleted before recycling",
newVolumeArray("volume6-6", "1Gi", "uid6-6", "claim6-6", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle),
novolumes,
noclaims,
noclaims,
noevents, noerrors,
wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
// Delete the volume before recycle operation starts
reactor.lock.Lock()
delete(reactor.volumes, "volume6-6")
reactor.lock.Unlock()
}),
},
{
// recycle success(?) - volume is recycled by previous recycler just
// at the time new doRecycle() starts. This simulates "volume no
// longer needs recycling, skipping".
"6-7 - volume is deleted before recycling",
newVolumeArray("volume6-7", "1Gi", "uid6-7", "claim6-7", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle, annBoundByController),
newVolumeArray("volume6-7", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRecycle),
noclaims,
noclaims,
noevents, noerrors,
wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
// Mark the volume as Available before the recycler starts
reactor.lock.Lock()
volume := reactor.volumes["volume6-7"]
volume.Spec.ClaimRef = nil
volume.Status.Phase = v1.VolumeAvailable
volume.Annotations = nil
reactor.lock.Unlock()
}),
},
{
// recycle success(?) - volume bound by user is recycled by previous
// recycler just at the time new doRecycle() starts. This simulates
// "volume no longer needs recycling, skipping" with volume bound by
// user.
"6-8 - prebound volume is deleted before recycling",
newVolumeArray("volume6-8", "1Gi", "uid6-8", "claim6-8", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle),
newVolumeArray("volume6-8", "1Gi", "", "claim6-8", v1.VolumeAvailable, v1.PersistentVolumeReclaimRecycle),
noclaims,
noclaims,
noevents, noerrors,
wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
// Mark the volume as Available before the recycler starts
reactor.lock.Lock()
volume := reactor.volumes["volume6-8"]
volume.Spec.ClaimRef.UID = ""
volume.Status.Phase = v1.VolumeAvailable
reactor.lock.Unlock()
}),
},
{
// recycle success - volume bound by user is recycled, while a new
// claim is created with another UID.
"6-9 - prebound volume is recycled while the claim exists",
newVolumeArray("volume6-9", "1Gi", "uid6-9", "claim6-9", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle),
newVolumeArray("volume6-9", "1Gi", "", "claim6-9", v1.VolumeAvailable, v1.PersistentVolumeReclaimRecycle),
newClaimArray("claim6-9", "uid6-9-x", "10Gi", "", v1.ClaimPending),
newClaimArray("claim6-9", "uid6-9-x", "10Gi", "", v1.ClaimPending),
noevents, noerrors,
// Inject recycler into the controller and call syncVolume. The
// recycler simulates one recycle() call that succeeds.
wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume),
},
{
// volume has unknown reclaim policy - failure expected
"6-10 - unknown reclaim policy",
newVolumeArray("volume6-10", "1Gi", "uid6-10", "claim6-10", v1.VolumeBound, "Unknown"),
withMessage("Volume has unrecognized PersistentVolumeReclaimPolicy", newVolumeArray("volume6-10", "1Gi", "uid6-10", "claim6-10", v1.VolumeFailed, "Unknown")),
noclaims,
noclaims,
[]string{"Warning VolumeUnknownReclaimPolicy"}, noerrors, testSyncVolume,
},
}
runSyncTests(t, tests, []*storage.StorageClass{})
}
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
// volume/claims. The test follows this pattern:
// 0. Load the controller with initial data.
// 1. Call controllerTest.testCall() once as in TestSync()
// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls,
// call appropriate syncVolume/syncClaim (simulating "volume/claim changed"
// events). Go to 2. if these calls change anything.
// 3. When all changes are processed and no new changes were made, call
// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync").
// 4. If some changes were done by step 3., go to 2. (simulation of
// "volume/claim updated" events, eventually performing step 3. again)
// 5. When 3. does not do any changes, finish the tests and compare final set
// of volumes/claims with expected claims/volumes and report differences.
// Some limit of calls in enforced to prevent endless loops.
func TestRecycleMultiSync(t *testing.T) {
tests := []controllerTest{
{
// recycle failure - recycle returns error. The controller should
// try again.
"7-1 - recycle returns error",
newVolumeArray("volume7-1", "1Gi", "uid7-1", "claim7-1", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle),
newVolumeArray("volume7-1", "1Gi", "", "claim7-1", v1.VolumeAvailable, v1.PersistentVolumeReclaimRecycle),
noclaims,
noclaims,
[]string{"Warning VolumeFailedRecycle"}, noerrors,
wrapTestWithReclaimCalls(operationRecycle, []error{errors.New("Mock recycle error"), nil}, testSyncVolume),
},
}
runMultisyncTests(t, tests, []*storage.StorageClass{}, "")
}

View file

@ -0,0 +1,82 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"fmt"
"net"
"k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/io"
"k8s.io/kubernetes/pkg/util/mount"
vol "k8s.io/kubernetes/pkg/volume"
)
// VolumeHost interface implementation for PersistentVolumeController.
var _ vol.VolumeHost = &PersistentVolumeController{}
func (ctrl *PersistentVolumeController) GetPluginDir(pluginName string) string {
return ""
}
func (ctrl *PersistentVolumeController) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string {
return ""
}
func (ctrl *PersistentVolumeController) GetPodPluginDir(podUID types.UID, pluginName string) string {
return ""
}
func (ctrl *PersistentVolumeController) GetKubeClient() clientset.Interface {
return ctrl.kubeClient
}
func (ctrl *PersistentVolumeController) NewWrapperMounter(volName string, spec vol.Spec, pod *v1.Pod, opts vol.VolumeOptions) (vol.Mounter, error) {
return nil, fmt.Errorf("PersistentVolumeController.NewWrapperMounter is not implemented")
}
func (ctrl *PersistentVolumeController) NewWrapperUnmounter(volName string, spec vol.Spec, podUID types.UID) (vol.Unmounter, error) {
return nil, fmt.Errorf("PersistentVolumeController.NewWrapperMounter is not implemented")
}
func (ctrl *PersistentVolumeController) GetCloudProvider() cloudprovider.Interface {
return ctrl.cloud
}
func (ctrl *PersistentVolumeController) GetMounter() mount.Interface {
return nil
}
func (ctrl *PersistentVolumeController) GetWriter() io.Writer {
return nil
}
func (ctrl *PersistentVolumeController) GetHostName() string {
return ""
}
func (ctrl *PersistentVolumeController) GetHostIP() (net.IP, error) {
return nil, fmt.Errorf("PersistentVolumeController.GetHostIP() is not implemented")
}
func (ctrl *PersistentVolumeController) GetNodeAllocatable() (v1.ResourceList, error) {
return v1.ResourceList{}, nil
}