Add glide.yaml and vendor deps

This commit is contained in:
Dalton Hubble 2016-12-03 22:43:32 -08:00
parent db918f12ad
commit 5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions

View file

@ -0,0 +1,63 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["volume_manager.go"],
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/kubelet/config:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/pod:go_default_library",
"//pkg/kubelet/util/format:go_default_library",
"//pkg/kubelet/volumemanager/cache:go_default_library",
"//pkg/kubelet/volumemanager/populator:go_default_library",
"//pkg/kubelet/volumemanager/reconciler:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/runtime:go_default_library",
"//pkg/util/sets:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util/operationexecutor:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = ["volume_manager_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/kubelet/config:go_default_library",
"//pkg/kubelet/container/testing:go_default_library",
"//pkg/kubelet/pod:go_default_library",
"//pkg/kubelet/pod/testing:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/sets:go_default_library",
"//pkg/util/testing:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
],
)

View file

@ -0,0 +1,2 @@
assignees:
- saad-ali

View file

@ -0,0 +1,46 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"actual_state_of_world.go",
"desired_state_of_world.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/types:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util/operationexecutor:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = [
"actual_state_of_world_test.go",
"desired_state_of_world_test.go",
],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
],
)

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,356 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package cache implements data structures used by the kubelet volume manager to
keep track of attached volumes and the pods that mounted them.
*/
package cache
import (
"fmt"
"sync"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
"k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// DesiredStateOfWorld defines a set of thread-safe operations for the kubelet
// volume manager's desired state of the world cache.
// This cache contains volumes->pods i.e. a set of all volumes that should be
// attached to this node and the pods that reference them and should mount the
// volume.
// Note: This is distinct from the DesiredStateOfWorld implemented by the
// attach/detach controller. They both keep track of different objects. This
// contains kubelet volume manager specific state.
type DesiredStateOfWorld interface {
// AddPodToVolume adds the given pod to the given volume in the cache
// indicating the specified pod should mount the specified volume.
// A unique volumeName is generated from the volumeSpec and returned on
// success.
// If no volume plugin can support the given volumeSpec or more than one
// plugin can support it, an error is returned.
// If a volume with the name volumeName does not exist in the list of
// volumes that should be attached to this node, the volume is implicitly
// added.
// If a pod with the same unique name already exists under the specified
// volume, this is a no-op.
AddPodToVolume(podName types.UniquePodName, pod *v1.Pod, volumeSpec *volume.Spec, outerVolumeSpecName string, volumeGidValue string) (v1.UniqueVolumeName, error)
// MarkVolumesReportedInUse sets the ReportedInUse value to true for the
// reportedVolumes. For volumes not in the reportedVolumes list, the
// ReportedInUse value is reset to false. The default ReportedInUse value
// for a newly created volume is false.
// When set to true this value indicates that the volume was successfully
// added to the VolumesInUse field in the node's status. Mount operation needs
// to check this value before issuing the operation.
// If a volume in the reportedVolumes list does not exist in the list of
// volumes that should be attached to this node, it is skipped without error.
MarkVolumesReportedInUse(reportedVolumes []v1.UniqueVolumeName)
// DeletePodFromVolume removes the given pod from the given volume in the
// cache indicating the specified pod no longer requires the specified
// volume.
// If a pod with the same unique name does not exist under the specified
// volume, this is a no-op.
// If a volume with the name volumeName does not exist in the list of
// attached volumes, this is a no-op.
// If after deleting the pod, the specified volume contains no other child
// pods, the volume is also deleted.
DeletePodFromVolume(podName types.UniquePodName, volumeName v1.UniqueVolumeName)
// VolumeExists returns true if the given volume exists in the list of
// volumes that should be attached to this node.
// If a pod with the same unique name does not exist under the specified
// volume, false is returned.
VolumeExists(volumeName v1.UniqueVolumeName) bool
// PodExistsInVolume returns true if the given pod exists in the list of
// podsToMount for the given volume in the cache.
// If a pod with the same unique name does not exist under the specified
// volume, false is returned.
// If a volume with the name volumeName does not exist in the list of
// attached volumes, false is returned.
PodExistsInVolume(podName types.UniquePodName, volumeName v1.UniqueVolumeName) bool
// GetVolumesToMount generates and returns a list of volumes that should be
// attached to this node and the pods they should be mounted to based on the
// current desired state of the world.
GetVolumesToMount() []VolumeToMount
// GetPods generates and returns a map of pods in which map is indexed
// with pod's unique name. This map can be used to determine which pod is currently
// in desired state of world.
GetPods() map[types.UniquePodName]bool
}
// VolumeToMount represents a volume that is attached to this node and needs to
// be mounted to PodName.
type VolumeToMount struct {
operationexecutor.VolumeToMount
}
// NewDesiredStateOfWorld returns a new instance of DesiredStateOfWorld.
func NewDesiredStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) DesiredStateOfWorld {
return &desiredStateOfWorld{
volumesToMount: make(map[v1.UniqueVolumeName]volumeToMount),
volumePluginMgr: volumePluginMgr,
}
}
type desiredStateOfWorld struct {
// volumesToMount is a map containing the set of volumes that should be
// attached to this node and mounted to the pods referencing it. The key in
// the map is the name of the volume and the value is a volume object
// containing more information about the volume.
volumesToMount map[v1.UniqueVolumeName]volumeToMount
// volumePluginMgr is the volume plugin manager used to create volume
// plugin objects.
volumePluginMgr *volume.VolumePluginMgr
sync.RWMutex
}
// The volume object represents a volume that should be attached to this node,
// and mounted to podsToMount.
type volumeToMount struct {
// volumeName contains the unique identifier for this volume.
volumeName v1.UniqueVolumeName
// podsToMount is a map containing the set of pods that reference this
// volume and should mount it once it is attached. The key in the map is
// the name of the pod and the value is a pod object containing more
// information about the pod.
podsToMount map[types.UniquePodName]podToMount
// pluginIsAttachable indicates that the plugin for this volume implements
// the volume.Attacher interface
pluginIsAttachable bool
// volumeGidValue contains the value of the GID annotation, if present.
volumeGidValue string
// reportedInUse indicates that the volume was successfully added to the
// VolumesInUse field in the node's status.
reportedInUse bool
}
// The pod object represents a pod that references the underlying volume and
// should mount it once it is attached.
type podToMount struct {
// podName contains the name of this pod.
podName types.UniquePodName
// Pod to mount the volume to. Used to create NewMounter.
pod *v1.Pod
// volume spec containing the specification for this volume. Used to
// generate the volume plugin object, and passed to plugin methods.
// For non-PVC volumes this is the same as defined in the pod object. For
// PVC volumes it is from the dereferenced PV object.
spec *volume.Spec
// outerVolumeSpecName is the volume.Spec.Name() of the volume as referenced
// directly in the pod. If the volume was referenced through a persistent
// volume claim, this contains the volume.Spec.Name() of the persistent
// volume claim
outerVolumeSpecName string
}
func (dsw *desiredStateOfWorld) AddPodToVolume(
podName types.UniquePodName,
pod *v1.Pod,
volumeSpec *volume.Spec,
outerVolumeSpecName string,
volumeGidValue string) (v1.UniqueVolumeName, error) {
dsw.Lock()
defer dsw.Unlock()
volumePlugin, err := dsw.volumePluginMgr.FindPluginBySpec(volumeSpec)
if err != nil || volumePlugin == nil {
return "", fmt.Errorf(
"failed to get Plugin from volumeSpec for volume %q err=%v",
volumeSpec.Name(),
err)
}
var volumeName v1.UniqueVolumeName
// The unique volume name used depends on whether the volume is attachable
// or not.
attachable := dsw.isAttachableVolume(volumeSpec)
if attachable {
// For attachable volumes, use the unique volume name as reported by
// the plugin.
volumeName, err =
volumehelper.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec)
if err != nil {
return "", fmt.Errorf(
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q using volume plugin %q err=%v",
volumeSpec.Name(),
volumePlugin.GetPluginName(),
err)
}
} else {
// For non-attachable volumes, generate a unique name based on the pod
// namespace and name and the name of the volume within the pod.
volumeName = volumehelper.GetUniqueVolumeNameForNonAttachableVolume(podName, volumePlugin, volumeSpec)
}
volumeObj, volumeExists := dsw.volumesToMount[volumeName]
if !volumeExists {
volumeObj = volumeToMount{
volumeName: volumeName,
podsToMount: make(map[types.UniquePodName]podToMount),
pluginIsAttachable: attachable,
volumeGidValue: volumeGidValue,
reportedInUse: false,
}
dsw.volumesToMount[volumeName] = volumeObj
}
// Create new podToMount object. If it already exists, it is refreshed with
// updated values (this is required for volumes that require remounting on
// pod update, like Downward API volumes).
dsw.volumesToMount[volumeName].podsToMount[podName] = podToMount{
podName: podName,
pod: pod,
spec: volumeSpec,
outerVolumeSpecName: outerVolumeSpecName,
}
return volumeName, nil
}
func (dsw *desiredStateOfWorld) MarkVolumesReportedInUse(
reportedVolumes []v1.UniqueVolumeName) {
dsw.Lock()
defer dsw.Unlock()
reportedVolumesMap := make(
map[v1.UniqueVolumeName]bool, len(reportedVolumes) /* capacity */)
for _, reportedVolume := range reportedVolumes {
reportedVolumesMap[reportedVolume] = true
}
for volumeName, volumeObj := range dsw.volumesToMount {
_, volumeReported := reportedVolumesMap[volumeName]
volumeObj.reportedInUse = volumeReported
dsw.volumesToMount[volumeName] = volumeObj
}
}
func (dsw *desiredStateOfWorld) DeletePodFromVolume(
podName types.UniquePodName, volumeName v1.UniqueVolumeName) {
dsw.Lock()
defer dsw.Unlock()
volumeObj, volumeExists := dsw.volumesToMount[volumeName]
if !volumeExists {
return
}
if _, podExists := volumeObj.podsToMount[podName]; !podExists {
return
}
// Delete pod if it exists
delete(dsw.volumesToMount[volumeName].podsToMount, podName)
if len(dsw.volumesToMount[volumeName].podsToMount) == 0 {
// Delete volume if no child pods left
delete(dsw.volumesToMount, volumeName)
}
}
func (dsw *desiredStateOfWorld) VolumeExists(
volumeName v1.UniqueVolumeName) bool {
dsw.RLock()
defer dsw.RUnlock()
_, volumeExists := dsw.volumesToMount[volumeName]
return volumeExists
}
func (dsw *desiredStateOfWorld) PodExistsInVolume(
podName types.UniquePodName, volumeName v1.UniqueVolumeName) bool {
dsw.RLock()
defer dsw.RUnlock()
volumeObj, volumeExists := dsw.volumesToMount[volumeName]
if !volumeExists {
return false
}
_, podExists := volumeObj.podsToMount[podName]
return podExists
}
func (dsw *desiredStateOfWorld) GetPods() map[types.UniquePodName]bool {
dsw.RLock()
defer dsw.RUnlock()
podList := make(map[types.UniquePodName]bool)
for _, volumeObj := range dsw.volumesToMount {
for podName := range volumeObj.podsToMount {
if !podList[podName] {
podList[podName] = true
}
}
}
return podList
}
func (dsw *desiredStateOfWorld) GetVolumesToMount() []VolumeToMount {
dsw.RLock()
defer dsw.RUnlock()
volumesToMount := make([]VolumeToMount, 0 /* len */, len(dsw.volumesToMount) /* cap */)
for volumeName, volumeObj := range dsw.volumesToMount {
for podName, podObj := range volumeObj.podsToMount {
volumesToMount = append(
volumesToMount,
VolumeToMount{
VolumeToMount: operationexecutor.VolumeToMount{
VolumeName: volumeName,
PodName: podName,
Pod: podObj.pod,
VolumeSpec: podObj.spec,
PluginIsAttachable: volumeObj.pluginIsAttachable,
OuterVolumeSpecName: podObj.outerVolumeSpecName,
VolumeGidValue: volumeObj.volumeGidValue,
ReportedInUse: volumeObj.reportedInUse}})
}
}
return volumesToMount
}
func (dsw *desiredStateOfWorld) isAttachableVolume(volumeSpec *volume.Spec) bool {
attachableVolumePlugin, _ :=
dsw.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if attachableVolumePlugin != nil {
volumeAttacher, err := attachableVolumePlugin.NewAttacher()
if err == nil && volumeAttacher != nil {
return true
}
}
return false
}

View file

@ -0,0 +1,381 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"testing"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/volume"
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// Calls AddPodToVolume() to add new pod to new volume
// Verifies newly added pod/volume exists via
// PodExistsInVolume() VolumeExists() and GetVolumesToMount()
func Test_AddPodToVolume_Positive_NewPodNewVolume(t *testing.T) {
// Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr)
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod3",
UID: "pod3uid",
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "volume-name",
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "fake-device1",
},
},
},
},
},
}
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
podName := volumehelper.GetUniquePodName(pod)
// Act
generatedVolumeName, err := dsw.AddPodToVolume(
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
// Assert
if err != nil {
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
}
verifyVolumeExistsDsw(t, generatedVolumeName, dsw)
verifyVolumeExistsInVolumesToMount(
t, generatedVolumeName, false /* expectReportedInUse */, dsw)
verifyPodExistsInVolumeDsw(t, podName, generatedVolumeName, dsw)
}
// Calls AddPodToVolume() twice to add the same pod to the same volume
// Verifies newly added pod/volume exists via
// PodExistsInVolume() VolumeExists() and GetVolumesToMount() and no errors.
func Test_AddPodToVolume_Positive_ExistingPodExistingVolume(t *testing.T) {
// Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr)
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod3",
UID: "pod3uid",
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "volume-name",
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "fake-device1",
},
},
},
},
},
}
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
podName := volumehelper.GetUniquePodName(pod)
// Act
generatedVolumeName, err := dsw.AddPodToVolume(
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
// Assert
if err != nil {
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
}
verifyVolumeExistsDsw(t, generatedVolumeName, dsw)
verifyVolumeExistsInVolumesToMount(
t, generatedVolumeName, false /* expectReportedInUse */, dsw)
verifyPodExistsInVolumeDsw(t, podName, generatedVolumeName, dsw)
}
// Populates data struct with a new volume/pod
// Calls DeletePodFromVolume() to removes the pod
// Verifies newly added pod/volume are deleted
func Test_DeletePodFromVolume_Positive_PodExistsVolumeExists(t *testing.T) {
// Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr)
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod3",
UID: "pod3uid",
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "volume-name",
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "fake-device1",
},
},
},
},
},
}
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
podName := volumehelper.GetUniquePodName(pod)
generatedVolumeName, err := dsw.AddPodToVolume(
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
if err != nil {
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
}
verifyVolumeExistsDsw(t, generatedVolumeName, dsw)
verifyVolumeExistsInVolumesToMount(
t, generatedVolumeName, false /* expectReportedInUse */, dsw)
verifyPodExistsInVolumeDsw(t, podName, generatedVolumeName, dsw)
// Act
dsw.DeletePodFromVolume(podName, generatedVolumeName)
// Assert
verifyVolumeDoesntExist(t, generatedVolumeName, dsw)
verifyVolumeDoesntExistInVolumesToMount(t, generatedVolumeName, dsw)
verifyPodDoesntExistInVolumeDsw(t, podName, generatedVolumeName, dsw)
}
// Calls AddPodToVolume() to add three new volumes to data struct
// Verifies newly added pod/volume exists via PodExistsInVolume()
// VolumeExists() and GetVolumesToMount()
// Marks only second volume as reported in use.
// Verifies only that volume is marked reported in use
// Marks only first volume as reported in use.
// Verifies only that volume is marked reported in use
func Test_MarkVolumesReportedInUse_Positive_NewPodNewVolume(t *testing.T) {
// Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr)
pod1 := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod1",
UID: "pod1uid",
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "volume1-name",
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "fake-device1",
},
},
},
},
},
}
volume1Spec := &volume.Spec{Volume: &pod1.Spec.Volumes[0]}
pod1Name := volumehelper.GetUniquePodName(pod1)
pod2 := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod2",
UID: "pod2uid",
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "volume2-name",
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "fake-device2",
},
},
},
},
},
}
volume2Spec := &volume.Spec{Volume: &pod2.Spec.Volumes[0]}
pod2Name := volumehelper.GetUniquePodName(pod2)
pod3 := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod3",
UID: "pod3uid",
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "volume3-name",
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "fake-device3",
},
},
},
},
},
}
volume3Spec := &volume.Spec{Volume: &pod3.Spec.Volumes[0]}
pod3Name := volumehelper.GetUniquePodName(pod3)
generatedVolume1Name, err := dsw.AddPodToVolume(
pod1Name, pod1, volume1Spec, volume1Spec.Name(), "" /* volumeGidValue */)
if err != nil {
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
}
generatedVolume2Name, err := dsw.AddPodToVolume(
pod2Name, pod2, volume2Spec, volume2Spec.Name(), "" /* volumeGidValue */)
if err != nil {
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
}
generatedVolume3Name, err := dsw.AddPodToVolume(
pod3Name, pod3, volume3Spec, volume3Spec.Name(), "" /* volumeGidValue */)
if err != nil {
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
}
// Act
volumesReportedInUse := []v1.UniqueVolumeName{generatedVolume2Name}
dsw.MarkVolumesReportedInUse(volumesReportedInUse)
// Assert
verifyVolumeExistsDsw(t, generatedVolume1Name, dsw)
verifyVolumeExistsInVolumesToMount(
t, generatedVolume1Name, false /* expectReportedInUse */, dsw)
verifyPodExistsInVolumeDsw(t, pod1Name, generatedVolume1Name, dsw)
verifyVolumeExistsDsw(t, generatedVolume2Name, dsw)
verifyVolumeExistsInVolumesToMount(
t, generatedVolume2Name, true /* expectReportedInUse */, dsw)
verifyPodExistsInVolumeDsw(t, pod2Name, generatedVolume2Name, dsw)
verifyVolumeExistsDsw(t, generatedVolume3Name, dsw)
verifyVolumeExistsInVolumesToMount(
t, generatedVolume3Name, false /* expectReportedInUse */, dsw)
verifyPodExistsInVolumeDsw(t, pod3Name, generatedVolume3Name, dsw)
// Act
volumesReportedInUse = []v1.UniqueVolumeName{generatedVolume3Name}
dsw.MarkVolumesReportedInUse(volumesReportedInUse)
// Assert
verifyVolumeExistsDsw(t, generatedVolume1Name, dsw)
verifyVolumeExistsInVolumesToMount(
t, generatedVolume1Name, false /* expectReportedInUse */, dsw)
verifyPodExistsInVolumeDsw(t, pod1Name, generatedVolume1Name, dsw)
verifyVolumeExistsDsw(t, generatedVolume2Name, dsw)
verifyVolumeExistsInVolumesToMount(
t, generatedVolume2Name, false /* expectReportedInUse */, dsw)
verifyPodExistsInVolumeDsw(t, pod2Name, generatedVolume2Name, dsw)
verifyVolumeExistsDsw(t, generatedVolume3Name, dsw)
verifyVolumeExistsInVolumesToMount(
t, generatedVolume3Name, true /* expectReportedInUse */, dsw)
verifyPodExistsInVolumeDsw(t, pod3Name, generatedVolume3Name, dsw)
}
func verifyVolumeExistsDsw(
t *testing.T, expectedVolumeName v1.UniqueVolumeName, dsw DesiredStateOfWorld) {
volumeExists := dsw.VolumeExists(expectedVolumeName)
if !volumeExists {
t.Fatalf(
"VolumeExists(%q) failed. Expected: <true> Actual: <%v>",
expectedVolumeName,
volumeExists)
}
}
func verifyVolumeDoesntExist(
t *testing.T, expectedVolumeName v1.UniqueVolumeName, dsw DesiredStateOfWorld) {
volumeExists := dsw.VolumeExists(expectedVolumeName)
if volumeExists {
t.Fatalf(
"VolumeExists(%q) returned incorrect value. Expected: <false> Actual: <%v>",
expectedVolumeName,
volumeExists)
}
}
func verifyVolumeExistsInVolumesToMount(
t *testing.T,
expectedVolumeName v1.UniqueVolumeName,
expectReportedInUse bool,
dsw DesiredStateOfWorld) {
volumesToMount := dsw.GetVolumesToMount()
for _, volume := range volumesToMount {
if volume.VolumeName == expectedVolumeName {
if volume.ReportedInUse != expectReportedInUse {
t.Fatalf(
"Found volume %v in the list of VolumesToMount, but ReportedInUse incorrect. Expected: <%v> Actual: <%v>",
expectedVolumeName,
expectReportedInUse,
volume.ReportedInUse)
}
return
}
}
t.Fatalf(
"Could not find volume %v in the list of desired state of world volumes to mount %+v",
expectedVolumeName,
volumesToMount)
}
func verifyVolumeDoesntExistInVolumesToMount(
t *testing.T, volumeToCheck v1.UniqueVolumeName, dsw DesiredStateOfWorld) {
volumesToMount := dsw.GetVolumesToMount()
for _, volume := range volumesToMount {
if volume.VolumeName == volumeToCheck {
t.Fatalf(
"Found volume %v in the list of desired state of world volumes to mount. Expected it not to exist.",
volumeToCheck)
}
}
}
func verifyPodExistsInVolumeDsw(
t *testing.T,
expectedPodName volumetypes.UniquePodName,
expectedVolumeName v1.UniqueVolumeName,
dsw DesiredStateOfWorld) {
if podExistsInVolume := dsw.PodExistsInVolume(
expectedPodName, expectedVolumeName); !podExistsInVolume {
t.Fatalf(
"DSW PodExistsInVolume returned incorrect value. Expected: <true> Actual: <%v>",
podExistsInVolume)
}
}
func verifyPodDoesntExistInVolumeDsw(
t *testing.T,
expectedPodName volumetypes.UniquePodName,
expectedVolumeName v1.UniqueVolumeName,
dsw DesiredStateOfWorld) {
if podExistsInVolume := dsw.PodExistsInVolume(
expectedPodName, expectedVolumeName); podExistsInVolume {
t.Fatalf(
"DSW PodExistsInVolume returned incorrect value. Expected: <true> Actual: <%v>",
podExistsInVolume)
}
}

View file

@ -0,0 +1,32 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["desired_state_of_world_populator.go"],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/pod:go_default_library",
"//pkg/kubelet/util/format:go_default_library",
"//pkg/kubelet/volumemanager/cache:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor:github.com/golang/glog",
],
)

View file

@ -0,0 +1,427 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package populator implements interfaces that monitor and keep the states of the
caches in sync with the "ground truth".
*/
package populator
import (
"fmt"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/pod"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/volume"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// DesiredStateOfWorldPopulator periodically loops through the list of active
// pods and ensures that each one exists in the desired state of the world cache
// if it has volumes. It also verifies that the pods in the desired state of the
// world cache still exist, if not, it removes them.
type DesiredStateOfWorldPopulator interface {
Run(stopCh <-chan struct{})
// ReprocessPod removes the specified pod from the list of processedPods
// (if it exists) forcing it to be reprocessed. This is required to enable
// remounting volumes on pod updates (volumes like Downward API volumes
// depend on this behavior to ensure volume content is updated).
ReprocessPod(podName volumetypes.UniquePodName)
}
// NewDesiredStateOfWorldPopulator returns a new instance of
// DesiredStateOfWorldPopulator.
//
// kubeClient - used to fetch PV and PVC objects from the API server
// loopSleepDuration - the amount of time the populator loop sleeps between
// successive executions
// podManager - the kubelet podManager that is the source of truth for the pods
// that exist on this host
// desiredStateOfWorld - the cache to populate
func NewDesiredStateOfWorldPopulator(
kubeClient clientset.Interface,
loopSleepDuration time.Duration,
getPodStatusRetryDuration time.Duration,
podManager pod.Manager,
desiredStateOfWorld cache.DesiredStateOfWorld,
kubeContainerRuntime kubecontainer.Runtime) DesiredStateOfWorldPopulator {
return &desiredStateOfWorldPopulator{
kubeClient: kubeClient,
loopSleepDuration: loopSleepDuration,
getPodStatusRetryDuration: getPodStatusRetryDuration,
podManager: podManager,
desiredStateOfWorld: desiredStateOfWorld,
pods: processedPods{
processedPods: make(map[volumetypes.UniquePodName]bool)},
kubeContainerRuntime: kubeContainerRuntime,
}
}
type desiredStateOfWorldPopulator struct {
kubeClient clientset.Interface
loopSleepDuration time.Duration
getPodStatusRetryDuration time.Duration
podManager pod.Manager
desiredStateOfWorld cache.DesiredStateOfWorld
pods processedPods
kubeContainerRuntime kubecontainer.Runtime
timeOfLastGetPodStatus time.Time
}
type processedPods struct {
processedPods map[volumetypes.UniquePodName]bool
sync.RWMutex
}
func (dswp *desiredStateOfWorldPopulator) Run(stopCh <-chan struct{}) {
wait.Until(dswp.populatorLoopFunc(), dswp.loopSleepDuration, stopCh)
}
func (dswp *desiredStateOfWorldPopulator) ReprocessPod(
podName volumetypes.UniquePodName) {
dswp.deleteProcessedPod(podName)
}
func (dswp *desiredStateOfWorldPopulator) populatorLoopFunc() func() {
return func() {
dswp.findAndAddNewPods()
// findAndRemoveDeletedPods() calls out to the container runtime to
// determine if the containers for a given pod are terminated. This is
// an expensive operation, therefore we limit the rate that
// findAndRemoveDeletedPods() is called independently of the main
// populator loop.
if time.Since(dswp.timeOfLastGetPodStatus) < dswp.getPodStatusRetryDuration {
glog.V(5).Infof(
"Skipping findAndRemoveDeletedPods(). Not permitted until %v (getPodStatusRetryDuration %v).",
dswp.timeOfLastGetPodStatus.Add(dswp.getPodStatusRetryDuration),
dswp.getPodStatusRetryDuration)
return
}
dswp.findAndRemoveDeletedPods()
}
}
func isPodTerminated(pod *v1.Pod) bool {
return pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded
}
// Iterate through all pods and add to desired state of world if they don't
// exist but should
func (dswp *desiredStateOfWorldPopulator) findAndAddNewPods() {
for _, pod := range dswp.podManager.GetPods() {
if isPodTerminated(pod) {
// Do not (re)add volumes for terminated pods
continue
}
dswp.processPodVolumes(pod)
}
}
// Iterate through all pods in desired state of world, and remove if they no
// longer exist
func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
var runningPods []*kubecontainer.Pod
runningPodsFetched := false
for _, volumeToMount := range dswp.desiredStateOfWorld.GetVolumesToMount() {
pod, podExists := dswp.podManager.GetPodByUID(volumeToMount.Pod.UID)
if podExists {
// Skip running pods
if !isPodTerminated(pod) {
continue
}
// Skip non-memory backed volumes belonging to terminated pods
volume := volumeToMount.VolumeSpec.Volume
if (volume.EmptyDir == nil || volume.EmptyDir.Medium != v1.StorageMediumMemory) &&
volume.ConfigMap == nil && volume.Secret == nil {
continue
}
}
// Once a pod has been deleted from kubelet pod manager, do not delete
// it immediately from volume manager. Instead, check the kubelet
// containerRuntime to verify that all containers in the pod have been
// terminated.
if !runningPodsFetched {
var getPodsErr error
runningPods, getPodsErr = dswp.kubeContainerRuntime.GetPods(false)
if getPodsErr != nil {
glog.Errorf(
"kubeContainerRuntime.findAndRemoveDeletedPods returned error %v.",
getPodsErr)
continue
}
runningPodsFetched = true
dswp.timeOfLastGetPodStatus = time.Now()
}
runningContainers := false
for _, runningPod := range runningPods {
if runningPod.ID == volumeToMount.Pod.UID {
if len(runningPod.Containers) > 0 {
runningContainers = true
}
break
}
}
if runningContainers {
glog.V(5).Infof(
"Pod %q has been removed from pod manager. However, it still has one or more containers in the non-exited state. Therefore, it will not be removed from volume manager.",
format.Pod(volumeToMount.Pod))
continue
}
glog.V(5).Infof(
"Removing volume %q (volSpec=%q) for pod %q from desired state.",
volumeToMount.VolumeName,
volumeToMount.VolumeSpec.Name(),
format.Pod(volumeToMount.Pod))
dswp.desiredStateOfWorld.DeletePodFromVolume(
volumeToMount.PodName, volumeToMount.VolumeName)
dswp.deleteProcessedPod(volumeToMount.PodName)
}
}
// processPodVolumes processes the volumes in the given pod and adds them to the
// desired state of the world.
func (dswp *desiredStateOfWorldPopulator) processPodVolumes(pod *v1.Pod) {
if pod == nil {
return
}
uniquePodName := volumehelper.GetUniquePodName(pod)
if dswp.podPreviouslyProcessed(uniquePodName) {
return
}
// Process volume spec for each volume defined in pod
for _, podVolume := range pod.Spec.Volumes {
volumeSpec, volumeGidValue, err :=
dswp.createVolumeSpec(podVolume, pod.Namespace)
if err != nil {
glog.Errorf(
"Error processing volume %q for pod %q: %v",
podVolume.Name,
format.Pod(pod),
err)
continue
}
// Add volume to desired state of world
_, err = dswp.desiredStateOfWorld.AddPodToVolume(
uniquePodName, pod, volumeSpec, podVolume.Name, volumeGidValue)
if err != nil {
glog.Errorf(
"Failed to add volume %q (specName: %q) for pod %q to desiredStateOfWorld. err=%v",
podVolume.Name,
volumeSpec.Name(),
uniquePodName,
err)
}
glog.V(10).Infof(
"Added volume %q (volSpec=%q) for pod %q to desired state.",
podVolume.Name,
volumeSpec.Name(),
uniquePodName)
}
dswp.markPodProcessed(uniquePodName)
}
// podPreviouslyProcessed returns true if the volumes for this pod have already
// been processed by the populator
func (dswp *desiredStateOfWorldPopulator) podPreviouslyProcessed(
podName volumetypes.UniquePodName) bool {
dswp.pods.RLock()
defer dswp.pods.RUnlock()
_, exists := dswp.pods.processedPods[podName]
return exists
}
// markPodProcessed records that the volumes for the specified pod have been
// processed by the populator
func (dswp *desiredStateOfWorldPopulator) markPodProcessed(
podName volumetypes.UniquePodName) {
dswp.pods.Lock()
defer dswp.pods.Unlock()
dswp.pods.processedPods[podName] = true
}
// markPodProcessed removes the specified pod from processedPods
func (dswp *desiredStateOfWorldPopulator) deleteProcessedPod(
podName volumetypes.UniquePodName) {
dswp.pods.Lock()
defer dswp.pods.Unlock()
delete(dswp.pods.processedPods, podName)
}
// createVolumeSpec creates and returns a mutatable volume.Spec object for the
// specified volume. It dereference any PVC to get PV objects, if needed.
func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
podVolume v1.Volume, podNamespace string) (*volume.Spec, string, error) {
if pvcSource :=
podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil {
glog.V(10).Infof(
"Found PVC, ClaimName: %q/%q",
podNamespace,
pvcSource.ClaimName)
// If podVolume is a PVC, fetch the real PV behind the claim
pvName, pvcUID, err := dswp.getPVCExtractPV(
podNamespace, pvcSource.ClaimName)
if err != nil {
return nil, "", fmt.Errorf(
"error processing PVC %q/%q: %v",
podNamespace,
pvcSource.ClaimName,
err)
}
glog.V(10).Infof(
"Found bound PV for PVC (ClaimName %q/%q pvcUID %v): pvName=%q",
podNamespace,
pvcSource.ClaimName,
pvcUID,
pvName)
// Fetch actual PV object
volumeSpec, volumeGidValue, err :=
dswp.getPVSpec(pvName, pvcSource.ReadOnly, pvcUID)
if err != nil {
return nil, "", fmt.Errorf(
"error processing PVC %q/%q: %v",
podNamespace,
pvcSource.ClaimName,
err)
}
glog.V(10).Infof(
"Extracted volumeSpec (%v) from bound PV (pvName %q) and PVC (ClaimName %q/%q pvcUID %v)",
volumeSpec.Name,
pvName,
podNamespace,
pvcSource.ClaimName,
pvcUID)
return volumeSpec, volumeGidValue, nil
}
// Do not return the original volume object, since the source could mutate it
clonedPodVolumeObj, err := api.Scheme.DeepCopy(podVolume)
if err != nil || clonedPodVolumeObj == nil {
return nil, "", fmt.Errorf(
"failed to deep copy %q volume object. err=%v", podVolume.Name, err)
}
clonedPodVolume, ok := clonedPodVolumeObj.(v1.Volume)
if !ok {
return nil, "", fmt.Errorf(
"failed to cast clonedPodVolume %#v to v1.Volume",
clonedPodVolumeObj)
}
return volume.NewSpecFromVolume(&clonedPodVolume), "", nil
}
// getPVCExtractPV fetches the PVC object with the given namespace and name from
// the API server extracts the name of the PV it is pointing to and returns it.
// An error is returned if the PVC object's phase is not "Bound".
func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV(
namespace string, claimName string) (string, types.UID, error) {
pvc, err :=
dswp.kubeClient.Core().PersistentVolumeClaims(namespace).Get(claimName)
if err != nil || pvc == nil {
return "", "", fmt.Errorf(
"failed to fetch PVC %s/%s from API server. err=%v",
namespace,
claimName,
err)
}
if pvc.Status.Phase != v1.ClaimBound || pvc.Spec.VolumeName == "" {
return "", "", fmt.Errorf(
"PVC %s/%s has non-bound phase (%q) or empty pvc.Spec.VolumeName (%q)",
namespace,
claimName,
pvc.Status.Phase,
pvc.Spec.VolumeName)
}
return pvc.Spec.VolumeName, pvc.UID, nil
}
// getPVSpec fetches the PV object with the given name from the API server
// and returns a volume.Spec representing it.
// An error is returned if the call to fetch the PV object fails.
func (dswp *desiredStateOfWorldPopulator) getPVSpec(
name string,
pvcReadOnly bool,
expectedClaimUID types.UID) (*volume.Spec, string, error) {
pv, err := dswp.kubeClient.Core().PersistentVolumes().Get(name)
if err != nil || pv == nil {
return nil, "", fmt.Errorf(
"failed to fetch PV %q from API server. err=%v", name, err)
}
if pv.Spec.ClaimRef == nil {
return nil, "", fmt.Errorf(
"found PV object %q but it has a nil pv.Spec.ClaimRef indicating it is not yet bound to the claim",
name)
}
if pv.Spec.ClaimRef.UID != expectedClaimUID {
return nil, "", fmt.Errorf(
"found PV object %q but its pv.Spec.ClaimRef.UID (%q) does not point to claim.UID (%q)",
name,
pv.Spec.ClaimRef.UID,
expectedClaimUID)
}
volumeGidValue := getPVVolumeGidAnnotationValue(pv)
return volume.NewSpecFromPersistentVolume(pv, pvcReadOnly), volumeGidValue, nil
}
func getPVVolumeGidAnnotationValue(pv *v1.PersistentVolume) string {
if volumeGid, ok := pv.Annotations[volumehelper.VolumeGidAnnotationKey]; ok {
return volumeGid
}
return ""
}

View file

@ -0,0 +1,61 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["reconciler.go"],
tags = ["automanaged"],
deps = [
"//cmd/kubelet/app/options:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/kubelet/config:go_default_library",
"//pkg/kubelet/volumemanager/cache:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util:go_default_library",
"//pkg/util/goroutinemap/exponentialbackoff:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util/nestedpendingoperations:go_default_library",
"//pkg/volume/util/operationexecutor:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = ["reconciler_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//pkg/kubelet/config:go_default_library",
"//pkg/kubelet/volumemanager/cache:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/sets:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util/operationexecutor:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor:github.com/stretchr/testify/assert",
],
)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,476 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reconciler
import (
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
"k8s.io/kubernetes/pkg/runtime"
k8stypes "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/volume"
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
const (
// reconcilerLoopSleepDuration is the amount of time the reconciler loop
// waits between successive executions
reconcilerLoopSleepDuration time.Duration = 0 * time.Millisecond
reconcilerSyncStatesSleepPeriod time.Duration = 10 * time.Minute
// waitForAttachTimeout is the maximum amount of time a
// operationexecutor.Mount call will wait for a volume to be attached.
waitForAttachTimeout time.Duration = 1 * time.Second
nodeName k8stypes.NodeName = k8stypes.NodeName("mynodename")
kubeletPodsDir string = "fake-dir"
)
// Calls Run()
// Verifies there are no calls to attach, detach, mount, unmount, etc.
func Test_Run_Positive_DoNothing(t *testing.T) {
// Arrange
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
kubeClient := createTestClient()
fakeRecorder := &record.FakeRecorder{}
oex := operationexecutor.NewOperationExecutor(kubeClient, volumePluginMgr, fakeRecorder, false /* checkNodeCapabilitiesBeforeMount*/)
reconciler := NewReconciler(
kubeClient,
false, /* controllerAttachDetachEnabled */
reconcilerLoopSleepDuration,
reconcilerSyncStatesSleepPeriod,
waitForAttachTimeout,
nodeName,
dsw,
asw,
oex,
&mount.FakeMounter{},
volumePluginMgr,
kubeletPodsDir)
// Act
runReconciler(reconciler)
// Assert
assert.NoError(t, volumetesting.VerifyZeroAttachCalls(fakePlugin))
assert.NoError(t, volumetesting.VerifyZeroWaitForAttachCallCount(fakePlugin))
assert.NoError(t, volumetesting.VerifyZeroMountDeviceCallCount(fakePlugin))
assert.NoError(t, volumetesting.VerifyZeroSetUpCallCount(fakePlugin))
assert.NoError(t, volumetesting.VerifyZeroTearDownCallCount(fakePlugin))
assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
}
// Populates desiredStateOfWorld cache with one volume/pod.
// Calls Run()
// Verifies there is are attach/mount/etc calls and no detach/unmount calls.
func Test_Run_Positive_VolumeAttachAndMount(t *testing.T) {
// Arrange
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
kubeClient := createTestClient()
fakeRecorder := &record.FakeRecorder{}
oex := operationexecutor.NewOperationExecutor(kubeClient, volumePluginMgr, fakeRecorder, false)
reconciler := NewReconciler(
kubeClient,
false, /* controllerAttachDetachEnabled */
reconcilerLoopSleepDuration,
reconcilerSyncStatesSleepPeriod,
waitForAttachTimeout,
nodeName,
dsw,
asw,
oex,
&mount.FakeMounter{},
volumePluginMgr,
kubeletPodsDir)
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod1",
UID: "pod1uid",
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "volume-name",
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "fake-device1",
},
},
},
},
},
}
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
podName := volumehelper.GetUniquePodName(pod)
generatedVolumeName, err := dsw.AddPodToVolume(
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
// Assert
if err != nil {
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
}
// Act
runReconciler(reconciler)
waitForMount(t, fakePlugin, generatedVolumeName, asw)
// Assert
assert.NoError(t, volumetesting.VerifyAttachCallCount(
1 /* expectedAttachCallCount */, fakePlugin))
assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
1 /* expectedWaitForAttachCallCount */, fakePlugin))
assert.NoError(t, volumetesting.VerifyMountDeviceCallCount(
1 /* expectedMountDeviceCallCount */, fakePlugin))
assert.NoError(t, volumetesting.VerifySetUpCallCount(
1 /* expectedSetUpCallCount */, fakePlugin))
assert.NoError(t, volumetesting.VerifyZeroTearDownCallCount(fakePlugin))
assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
}
// Populates desiredStateOfWorld cache with one volume/pod.
// Enables controllerAttachDetachEnabled.
// Calls Run()
// Verifies there is one mount call and no unmount calls.
// Verifies there are no attach/detach calls.
func Test_Run_Positive_VolumeMountControllerAttachEnabled(t *testing.T) {
// Arrange
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
kubeClient := createTestClient()
fakeRecorder := &record.FakeRecorder{}
oex := operationexecutor.NewOperationExecutor(kubeClient, volumePluginMgr, fakeRecorder, false)
reconciler := NewReconciler(
kubeClient,
true, /* controllerAttachDetachEnabled */
reconcilerLoopSleepDuration,
reconcilerSyncStatesSleepPeriod,
waitForAttachTimeout,
nodeName,
dsw,
asw,
oex,
&mount.FakeMounter{},
volumePluginMgr,
kubeletPodsDir)
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod1",
UID: "pod1uid",
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "volume-name",
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "fake-device1",
},
},
},
},
},
}
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
podName := volumehelper.GetUniquePodName(pod)
generatedVolumeName, err := dsw.AddPodToVolume(
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{generatedVolumeName})
// Assert
if err != nil {
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
}
// Act
runReconciler(reconciler)
waitForMount(t, fakePlugin, generatedVolumeName, asw)
// Assert
assert.NoError(t, volumetesting.VerifyZeroAttachCalls(fakePlugin))
assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
1 /* expectedWaitForAttachCallCount */, fakePlugin))
assert.NoError(t, volumetesting.VerifyMountDeviceCallCount(
1 /* expectedMountDeviceCallCount */, fakePlugin))
assert.NoError(t, volumetesting.VerifySetUpCallCount(
1 /* expectedSetUpCallCount */, fakePlugin))
assert.NoError(t, volumetesting.VerifyZeroTearDownCallCount(fakePlugin))
assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
}
// Populates desiredStateOfWorld cache with one volume/pod.
// Calls Run()
// Verifies there is one attach/mount/etc call and no detach calls.
// Deletes volume/pod from desired state of world.
// Verifies detach/unmount calls are issued.
func Test_Run_Positive_VolumeAttachMountUnmountDetach(t *testing.T) {
// Arrange
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
kubeClient := createTestClient()
fakeRecorder := &record.FakeRecorder{}
oex := operationexecutor.NewOperationExecutor(kubeClient, volumePluginMgr, fakeRecorder, false)
reconciler := NewReconciler(
kubeClient,
false, /* controllerAttachDetachEnabled */
reconcilerLoopSleepDuration,
reconcilerSyncStatesSleepPeriod,
waitForAttachTimeout,
nodeName,
dsw,
asw,
oex,
&mount.FakeMounter{},
volumePluginMgr,
kubeletPodsDir)
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod1",
UID: "pod1uid",
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "volume-name",
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "fake-device1",
},
},
},
},
},
}
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
podName := volumehelper.GetUniquePodName(pod)
generatedVolumeName, err := dsw.AddPodToVolume(
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
// Assert
if err != nil {
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
}
// Act
runReconciler(reconciler)
waitForMount(t, fakePlugin, generatedVolumeName, asw)
// Assert
assert.NoError(t, volumetesting.VerifyAttachCallCount(
1 /* expectedAttachCallCount */, fakePlugin))
assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
1 /* expectedWaitForAttachCallCount */, fakePlugin))
assert.NoError(t, volumetesting.VerifyMountDeviceCallCount(
1 /* expectedMountDeviceCallCount */, fakePlugin))
assert.NoError(t, volumetesting.VerifySetUpCallCount(
1 /* expectedSetUpCallCount */, fakePlugin))
assert.NoError(t, volumetesting.VerifyZeroTearDownCallCount(fakePlugin))
assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
// Act
dsw.DeletePodFromVolume(podName, generatedVolumeName)
waitForDetach(t, fakePlugin, generatedVolumeName, asw)
// Assert
assert.NoError(t, volumetesting.VerifyTearDownCallCount(
1 /* expectedTearDownCallCount */, fakePlugin))
assert.NoError(t, volumetesting.VerifyDetachCallCount(
1 /* expectedDetachCallCount */, fakePlugin))
}
// Populates desiredStateOfWorld cache with one volume/pod.
// Enables controllerAttachDetachEnabled.
// Calls Run()
// Verifies one mount call is made and no unmount calls.
// Deletes volume/pod from desired state of world.
// Verifies one unmount call is made.
// Verifies there are no attach/detach calls made.
func Test_Run_Positive_VolumeUnmountControllerAttachEnabled(t *testing.T) {
// Arrange
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
kubeClient := createTestClient()
fakeRecorder := &record.FakeRecorder{}
oex := operationexecutor.NewOperationExecutor(kubeClient, volumePluginMgr, fakeRecorder, false)
reconciler := NewReconciler(
kubeClient,
true, /* controllerAttachDetachEnabled */
reconcilerLoopSleepDuration,
reconcilerSyncStatesSleepPeriod,
waitForAttachTimeout,
nodeName,
dsw,
asw,
oex,
&mount.FakeMounter{},
volumePluginMgr,
kubeletPodsDir)
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod1",
UID: "pod1uid",
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "volume-name",
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "fake-device1",
},
},
},
},
},
}
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
podName := volumehelper.GetUniquePodName(pod)
generatedVolumeName, err := dsw.AddPodToVolume(
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
// Assert
if err != nil {
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
}
// Act
runReconciler(reconciler)
dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{generatedVolumeName})
waitForMount(t, fakePlugin, generatedVolumeName, asw)
// Assert
assert.NoError(t, volumetesting.VerifyZeroAttachCalls(fakePlugin))
assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
1 /* expectedWaitForAttachCallCount */, fakePlugin))
assert.NoError(t, volumetesting.VerifyMountDeviceCallCount(
1 /* expectedMountDeviceCallCount */, fakePlugin))
assert.NoError(t, volumetesting.VerifySetUpCallCount(
1 /* expectedSetUpCallCount */, fakePlugin))
assert.NoError(t, volumetesting.VerifyZeroTearDownCallCount(fakePlugin))
assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
// Act
dsw.DeletePodFromVolume(podName, generatedVolumeName)
waitForDetach(t, fakePlugin, generatedVolumeName, asw)
// Assert
assert.NoError(t, volumetesting.VerifyTearDownCallCount(
1 /* expectedTearDownCallCount */, fakePlugin))
assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
}
func waitForMount(
t *testing.T,
fakePlugin *volumetesting.FakeVolumePlugin,
volumeName v1.UniqueVolumeName,
asw cache.ActualStateOfWorld) {
err := retryWithExponentialBackOff(
time.Duration(5*time.Millisecond),
func() (bool, error) {
mountedVolumes := asw.GetMountedVolumes()
for _, mountedVolume := range mountedVolumes {
if mountedVolume.VolumeName == volumeName {
return true, nil
}
}
return false, nil
},
)
if err != nil {
t.Fatalf("Timed out waiting for volume %q to be attached.", volumeName)
}
}
func waitForDetach(
t *testing.T,
fakePlugin *volumetesting.FakeVolumePlugin,
volumeName v1.UniqueVolumeName,
asw cache.ActualStateOfWorld) {
err := retryWithExponentialBackOff(
time.Duration(5*time.Millisecond),
func() (bool, error) {
if asw.VolumeExists(volumeName) {
return false, nil
}
return true, nil
},
)
if err != nil {
t.Fatalf("Timed out waiting for volume %q to be detached.", volumeName)
}
}
func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.ConditionFunc) error {
backoff := wait.Backoff{
Duration: initialDuration,
Factor: 3,
Jitter: 0,
Steps: 6,
}
return wait.ExponentialBackoff(backoff, fn)
}
func createTestClient() *fake.Clientset {
fakeClient := &fake.Clientset{}
fakeClient.AddReactor("get", "nodes",
func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.Node{
ObjectMeta: v1.ObjectMeta{Name: string(nodeName)},
Status: v1.NodeStatus{
VolumesAttached: []v1.AttachedVolume{
{
Name: "fake-plugin/volume-name",
DevicePath: "fake/path",
},
}},
Spec: v1.NodeSpec{ExternalID: string(nodeName)},
}, nil
})
fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
})
return fakeClient
}
func runReconciler(reconciler Reconciler) {
sourcesReady := config.NewSourcesReady(func(_ sets.String) bool { return false })
go reconciler.Run(sourcesReady, wait.NeverStop)
}

View file

@ -0,0 +1,440 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumemanager
import (
"fmt"
"strconv"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/container"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/pod"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/populator"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler"
k8stypes "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
"k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
const (
// reconcilerLoopSleepPeriod is the amount of time the reconciler loop waits
// between successive executions
reconcilerLoopSleepPeriod time.Duration = 100 * time.Millisecond
// reconcilerSyncStatesSleepPeriod is the amount of time the reconciler reconstruct process
// waits between successive executions
reconcilerSyncStatesSleepPeriod time.Duration = 3 * time.Minute
// desiredStateOfWorldPopulatorLoopSleepPeriod is the amount of time the
// DesiredStateOfWorldPopulator loop waits between successive executions
desiredStateOfWorldPopulatorLoopSleepPeriod time.Duration = 100 * time.Millisecond
// desiredStateOfWorldPopulatorGetPodStatusRetryDuration is the amount of
// time the DesiredStateOfWorldPopulator loop waits between successive pod
// cleanup calls (to prevent calling containerruntime.GetPodStatus too
// frequently).
desiredStateOfWorldPopulatorGetPodStatusRetryDuration time.Duration = 2 * time.Second
// podAttachAndMountTimeout is the maximum amount of time the
// WaitForAttachAndMount call will wait for all volumes in the specified pod
// to be attached and mounted. Even though cloud operations can take several
// minutes to complete, we set the timeout to 2 minutes because kubelet
// will retry in the next sync iteration. This frees the associated
// goroutine of the pod to process newer updates if needed (e.g., a delete
// request to the pod).
podAttachAndMountTimeout time.Duration = 2 * time.Minute
// podAttachAndMountRetryInterval is the amount of time the GetVolumesForPod
// call waits before retrying
podAttachAndMountRetryInterval time.Duration = 300 * time.Millisecond
// waitForAttachTimeout is the maximum amount of time a
// operationexecutor.Mount call will wait for a volume to be attached.
// Set to 10 minutes because we've seen attach operations take several
// minutes to complete for some volume plugins in some cases. While this
// operation is waiting it only blocks other operations on the same device,
// other devices are not affected.
waitForAttachTimeout time.Duration = 10 * time.Minute
// reconcilerStartGracePeriod is the maximum amount of time volume manager
// can wait to start reconciler
reconcilerStartGracePeriod time.Duration = 60 * time.Second
)
// VolumeManager runs a set of asynchronous loops that figure out which volumes
// need to be attached/mounted/unmounted/detached based on the pods scheduled on
// this node and makes it so.
type VolumeManager interface {
// Starts the volume manager and all the asynchronous loops that it controls
Run(sourcesReady config.SourcesReady, stopCh <-chan struct{})
// WaitForAttachAndMount processes the volumes referenced in the specified
// pod and blocks until they are all attached and mounted (reflected in
// actual state of the world).
// An error is returned if all volumes are not attached and mounted within
// the duration defined in podAttachAndMountTimeout.
WaitForAttachAndMount(pod *v1.Pod) error
// GetMountedVolumesForPod returns a VolumeMap containing the volumes
// referenced by the specified pod that are successfully attached and
// mounted. The key in the map is the OuterVolumeSpecName (i.e.
// pod.Spec.Volumes[x].Name). It returns an empty VolumeMap if pod has no
// volumes.
GetMountedVolumesForPod(podName types.UniquePodName) container.VolumeMap
// GetExtraSupplementalGroupsForPod returns a list of the extra
// supplemental groups for the Pod. These extra supplemental groups come
// from annotations on persistent volumes that the pod depends on.
GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64
// GetVolumesInUse returns a list of all volumes that implement the volume.Attacher
// interface and are currently in use according to the actual and desired
// state of the world caches. A volume is considered "in use" as soon as it
// is added to the desired state of world, indicating it *should* be
// attached to this node and remains "in use" until it is removed from both
// the desired state of the world and the actual state of the world, or it
// has been unmounted (as indicated in actual state of world).
// TODO(#27653): VolumesInUse should be handled gracefully on kubelet'
// restarts.
GetVolumesInUse() []v1.UniqueVolumeName
// ReconcilerStatesHasBeenSynced returns true only after the actual states in reconciler
// has been synced at least once after kubelet starts so that it is safe to update mounted
// volume list retrieved from actual state.
ReconcilerStatesHasBeenSynced() bool
// VolumeIsAttached returns true if the given volume is attached to this
// node.
VolumeIsAttached(volumeName v1.UniqueVolumeName) bool
// Marks the specified volume as having successfully been reported as "in
// use" in the nodes's volume status.
MarkVolumesAsReportedInUse(volumesReportedAsInUse []v1.UniqueVolumeName)
}
// NewVolumeManager returns a new concrete instance implementing the
// VolumeManager interface.
//
// kubeClient - kubeClient is the kube API client used by DesiredStateOfWorldPopulator
// to communicate with the API server to fetch PV and PVC objects
// volumePluginMgr - the volume plugin manager used to access volume plugins.
// Must be pre-initialized.
func NewVolumeManager(
controllerAttachDetachEnabled bool,
nodeName k8stypes.NodeName,
podManager pod.Manager,
kubeClient clientset.Interface,
volumePluginMgr *volume.VolumePluginMgr,
kubeContainerRuntime kubecontainer.Runtime,
mounter mount.Interface,
kubeletPodsDir string,
recorder record.EventRecorder,
checkNodeCapabilitiesBeforeMount bool) (VolumeManager, error) {
vm := &volumeManager{
kubeClient: kubeClient,
volumePluginMgr: volumePluginMgr,
desiredStateOfWorld: cache.NewDesiredStateOfWorld(volumePluginMgr),
actualStateOfWorld: cache.NewActualStateOfWorld(nodeName, volumePluginMgr),
operationExecutor: operationexecutor.NewOperationExecutor(
kubeClient,
volumePluginMgr,
recorder,
checkNodeCapabilitiesBeforeMount),
}
vm.reconciler = reconciler.NewReconciler(
kubeClient,
controllerAttachDetachEnabled,
reconcilerLoopSleepPeriod,
reconcilerSyncStatesSleepPeriod,
waitForAttachTimeout,
nodeName,
vm.desiredStateOfWorld,
vm.actualStateOfWorld,
vm.operationExecutor,
mounter,
volumePluginMgr,
kubeletPodsDir)
vm.desiredStateOfWorldPopulator = populator.NewDesiredStateOfWorldPopulator(
kubeClient,
desiredStateOfWorldPopulatorLoopSleepPeriod,
desiredStateOfWorldPopulatorGetPodStatusRetryDuration,
podManager,
vm.desiredStateOfWorld,
kubeContainerRuntime)
return vm, nil
}
// volumeManager implements the VolumeManager interface
type volumeManager struct {
// kubeClient is the kube API client used by DesiredStateOfWorldPopulator to
// communicate with the API server to fetch PV and PVC objects
kubeClient clientset.Interface
// volumePluginMgr is the volume plugin manager used to access volume
// plugins. It must be pre-initialized.
volumePluginMgr *volume.VolumePluginMgr
// desiredStateOfWorld is a data structure containing the desired state of
// the world according to the volume manager: i.e. what volumes should be
// attached and which pods are referencing the volumes).
// The data structure is populated by the desired state of the world
// populator using the kubelet pod manager.
desiredStateOfWorld cache.DesiredStateOfWorld
// actualStateOfWorld is a data structure containing the actual state of
// the world according to the manager: i.e. which volumes are attached to
// this node and what pods the volumes are mounted to.
// The data structure is populated upon successful completion of attach,
// detach, mount, and unmount actions triggered by the reconciler.
actualStateOfWorld cache.ActualStateOfWorld
// operationExecutor is used to start asynchronous attach, detach, mount,
// and unmount operations.
operationExecutor operationexecutor.OperationExecutor
// reconciler runs an asynchronous periodic loop to reconcile the
// desiredStateOfWorld with the actualStateOfWorld by triggering attach,
// detach, mount, and unmount operations using the operationExecutor.
reconciler reconciler.Reconciler
// desiredStateOfWorldPopulator runs an asynchronous periodic loop to
// populate the desiredStateOfWorld using the kubelet PodManager.
desiredStateOfWorldPopulator populator.DesiredStateOfWorldPopulator
}
func (vm *volumeManager) Run(sourcesReady config.SourcesReady, stopCh <-chan struct{}) {
defer runtime.HandleCrash()
go vm.desiredStateOfWorldPopulator.Run(stopCh)
glog.V(2).Infof("The desired_state_of_world populator starts")
glog.Infof("Starting Kubelet Volume Manager")
go vm.reconciler.Run(sourcesReady, stopCh)
<-stopCh
glog.Infof("Shutting down Kubelet Volume Manager")
}
func (vm *volumeManager) GetMountedVolumesForPod(
podName types.UniquePodName) container.VolumeMap {
podVolumes := make(container.VolumeMap)
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
podVolumes[mountedVolume.OuterVolumeSpecName] = container.VolumeInfo{Mounter: mountedVolume.Mounter}
}
return podVolumes
}
func (vm *volumeManager) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 {
podName := volumehelper.GetUniquePodName(pod)
supplementalGroups := sets.NewString()
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
if mountedVolume.VolumeGidValue != "" {
supplementalGroups.Insert(mountedVolume.VolumeGidValue)
}
}
result := make([]int64, 0, supplementalGroups.Len())
for _, group := range supplementalGroups.List() {
iGroup, extra := getExtraSupplementalGid(group, pod)
if !extra {
continue
}
result = append(result, int64(iGroup))
}
return result
}
func (vm *volumeManager) GetVolumesInUse() []v1.UniqueVolumeName {
// Report volumes in desired state of world and actual state of world so
// that volumes are marked in use as soon as the decision is made that the
// volume *should* be attached to this node until it is safely unmounted.
desiredVolumes := vm.desiredStateOfWorld.GetVolumesToMount()
mountedVolumes := vm.actualStateOfWorld.GetGloballyMountedVolumes()
volumesToReportInUse :=
make(
[]v1.UniqueVolumeName,
0, /* len */
len(desiredVolumes)+len(mountedVolumes) /* cap */)
desiredVolumesMap :=
make(
map[v1.UniqueVolumeName]bool,
len(desiredVolumes)+len(mountedVolumes) /* cap */)
for _, volume := range desiredVolumes {
if volume.PluginIsAttachable {
desiredVolumesMap[volume.VolumeName] = true
volumesToReportInUse = append(volumesToReportInUse, volume.VolumeName)
}
}
for _, volume := range mountedVolumes {
if volume.PluginIsAttachable {
if _, exists := desiredVolumesMap[volume.VolumeName]; !exists {
volumesToReportInUse = append(volumesToReportInUse, volume.VolumeName)
}
}
}
return volumesToReportInUse
}
func (vm *volumeManager) ReconcilerStatesHasBeenSynced() bool {
return vm.reconciler.StatesHasBeenSynced()
}
func (vm *volumeManager) VolumeIsAttached(
volumeName v1.UniqueVolumeName) bool {
return vm.actualStateOfWorld.VolumeExists(volumeName)
}
func (vm *volumeManager) MarkVolumesAsReportedInUse(
volumesReportedAsInUse []v1.UniqueVolumeName) {
vm.desiredStateOfWorld.MarkVolumesReportedInUse(volumesReportedAsInUse)
}
func (vm *volumeManager) WaitForAttachAndMount(pod *v1.Pod) error {
expectedVolumes := getExpectedVolumes(pod)
if len(expectedVolumes) == 0 {
// No volumes to verify
return nil
}
glog.V(3).Infof("Waiting for volumes to attach and mount for pod %q", format.Pod(pod))
uniquePodName := volumehelper.GetUniquePodName(pod)
// Some pods expect to have Setup called over and over again to update.
// Remount plugins for which this is true. (Atomically updating volumes,
// like Downward API, depend on this to update the contents of the volume).
vm.desiredStateOfWorldPopulator.ReprocessPod(uniquePodName)
vm.actualStateOfWorld.MarkRemountRequired(uniquePodName)
err := wait.Poll(
podAttachAndMountRetryInterval,
podAttachAndMountTimeout,
vm.verifyVolumesMountedFunc(uniquePodName, expectedVolumes))
if err != nil {
// Timeout expired
unmountedVolumes :=
vm.getUnmountedVolumes(uniquePodName, expectedVolumes)
if len(unmountedVolumes) == 0 {
return nil
}
return fmt.Errorf(
"timeout expired waiting for volumes to attach/mount for pod %q/%q. list of unattached/unmounted volumes=%v",
pod.Name,
pod.Namespace,
unmountedVolumes)
}
glog.V(3).Infof("All volumes are attached and mounted for pod %q", format.Pod(pod))
return nil
}
// verifyVolumesMountedFunc returns a method that returns true when all expected
// volumes are mounted.
func (vm *volumeManager) verifyVolumesMountedFunc(
podName types.UniquePodName, expectedVolumes []string) wait.ConditionFunc {
return func() (done bool, err error) {
return len(vm.getUnmountedVolumes(podName, expectedVolumes)) == 0, nil
}
}
// getUnmountedVolumes fetches the current list of mounted volumes from
// the actual state of the world, and uses it to process the list of
// expectedVolumes. It returns a list of unmounted volumes.
func (vm *volumeManager) getUnmountedVolumes(
podName types.UniquePodName, expectedVolumes []string) []string {
mountedVolumes := sets.NewString()
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
mountedVolumes.Insert(mountedVolume.OuterVolumeSpecName)
}
return filterUnmountedVolumes(mountedVolumes, expectedVolumes)
}
// filterUnmountedVolumes adds each element of expectedVolumes that is not in
// mountedVolumes to a list of unmountedVolumes and returns it.
func filterUnmountedVolumes(
mountedVolumes sets.String, expectedVolumes []string) []string {
unmountedVolumes := []string{}
for _, expectedVolume := range expectedVolumes {
if !mountedVolumes.Has(expectedVolume) {
unmountedVolumes = append(unmountedVolumes, expectedVolume)
}
}
return unmountedVolumes
}
// getExpectedVolumes returns a list of volumes that must be mounted in order to
// consider the volume setup step for this pod satisfied.
func getExpectedVolumes(pod *v1.Pod) []string {
expectedVolumes := []string{}
if pod == nil {
return expectedVolumes
}
for _, podVolume := range pod.Spec.Volumes {
expectedVolumes = append(expectedVolumes, podVolume.Name)
}
return expectedVolumes
}
// getExtraSupplementalGid returns the value of an extra supplemental GID as
// defined by an annotation on a volume and a boolean indicating whether the
// volume defined a GID that the pod doesn't already request.
func getExtraSupplementalGid(volumeGidValue string, pod *v1.Pod) (int64, bool) {
if volumeGidValue == "" {
return 0, false
}
gid, err := strconv.ParseInt(volumeGidValue, 10, 64)
if err != nil {
return 0, false
}
if pod.Spec.SecurityContext != nil {
for _, existingGid := range pod.Spec.SecurityContext.SupplementalGroups {
if gid == existingGid {
return 0, false
}
}
}
return gid, true
}

View file

@ -0,0 +1,294 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumemanager
import (
"os"
"reflect"
"strconv"
"testing"
"time"
"k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/kubelet/config"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/pod"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/sets"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
const (
testHostname = "test-hostname"
)
func TestGetMountedVolumesForPodAndGetVolumesInUse(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("volumeManagerTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient())
node, pod, pv, claim := createObjects()
kubeClient := fake.NewSimpleClientset(node, pod, pv, claim)
manager, err := newTestVolumeManager(tmpDir, podManager, kubeClient)
if err != nil {
t.Fatalf("Failed to initialize volume manager: %v", err)
}
stopCh := runVolumeManager(manager)
defer close(stopCh)
podManager.SetPods([]*v1.Pod{pod})
// Fake node status update
go simulateVolumeInUseUpdate(
v1.UniqueVolumeName(node.Status.VolumesAttached[0].Name),
stopCh,
manager)
err = manager.WaitForAttachAndMount(pod)
if err != nil {
t.Errorf("Expected success: %v", err)
}
expectedMounted := pod.Spec.Volumes[0].Name
actualMounted := manager.GetMountedVolumesForPod(types.UniquePodName(pod.ObjectMeta.UID))
if _, ok := actualMounted[expectedMounted]; !ok || (len(actualMounted) != 1) {
t.Errorf("Expected %v to be mounted to pod but got %v", expectedMounted, actualMounted)
}
expectedInUse := []v1.UniqueVolumeName{v1.UniqueVolumeName(node.Status.VolumesAttached[0].Name)}
actualInUse := manager.GetVolumesInUse()
if !reflect.DeepEqual(expectedInUse, actualInUse) {
t.Errorf("Expected %v to be in use but got %v", expectedInUse, actualInUse)
}
}
func TestGetExtraSupplementalGroupsForPod(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("volumeManagerTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient())
node, pod, _, claim := createObjects()
existingGid := pod.Spec.SecurityContext.SupplementalGroups[0]
cases := []struct {
gidAnnotation string
expected []int64
}{
{
gidAnnotation: "777",
expected: []int64{777},
},
{
gidAnnotation: strconv.FormatInt(existingGid, 10),
expected: []int64{},
},
{
gidAnnotation: "a",
expected: []int64{},
},
{
gidAnnotation: "",
expected: []int64{},
},
}
for _, tc := range cases {
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "pvA",
Annotations: map[string]string{
volumehelper.VolumeGidAnnotationKey: tc.gidAnnotation,
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "fake-device",
},
},
ClaimRef: &v1.ObjectReference{
Name: claim.ObjectMeta.Name,
},
},
}
kubeClient := fake.NewSimpleClientset(node, pod, pv, claim)
manager, err := newTestVolumeManager(tmpDir, podManager, kubeClient)
if err != nil {
t.Errorf("Failed to initialize volume manager: %v", err)
continue
}
stopCh := runVolumeManager(manager)
defer func() {
close(stopCh)
}()
podManager.SetPods([]*v1.Pod{pod})
// Fake node status update
go simulateVolumeInUseUpdate(
v1.UniqueVolumeName(node.Status.VolumesAttached[0].Name),
stopCh,
manager)
err = manager.WaitForAttachAndMount(pod)
if err != nil {
t.Errorf("Expected success: %v", err)
continue
}
actual := manager.GetExtraSupplementalGroupsForPod(pod)
if !reflect.DeepEqual(tc.expected, actual) {
t.Errorf("Expected supplemental groups %v, got %v", tc.expected, actual)
}
}
}
func newTestVolumeManager(
tmpDir string,
podManager pod.Manager,
kubeClient clientset.Interface) (VolumeManager, error) {
plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil}
fakeRecorder := &record.FakeRecorder{}
plugMgr := &volume.VolumePluginMgr{}
plugMgr.InitPlugins([]volume.VolumePlugin{plug}, volumetest.NewFakeVolumeHost(tmpDir, kubeClient, nil))
vm, err := NewVolumeManager(
true,
testHostname,
podManager,
kubeClient,
plugMgr,
&containertest.FakeRuntime{},
&mount.FakeMounter{},
"",
fakeRecorder,
false /* experimentalCheckNodeCapabilitiesBeforeMount*/)
return vm, err
}
// createObjects returns objects for making a fake clientset. The pv is
// already attached to the node and bound to the claim used by the pod.
func createObjects() (*v1.Node, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
node := &v1.Node{
ObjectMeta: v1.ObjectMeta{Name: testHostname},
Status: v1.NodeStatus{
VolumesAttached: []v1.AttachedVolume{
{
Name: "fake/pvA",
DevicePath: "fake/path",
},
}},
Spec: v1.NodeSpec{ExternalID: testHostname},
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "abc",
Namespace: "nsA",
UID: "1234",
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "vol1",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "claimA",
},
},
},
},
SecurityContext: &v1.PodSecurityContext{
SupplementalGroups: []int64{555},
},
},
}
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "fake-device",
},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
return node, pod, pv, claim
}
func simulateVolumeInUseUpdate(
volumeName v1.UniqueVolumeName,
stopCh <-chan struct{},
volumeManager VolumeManager) {
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ticker.C:
volumeManager.MarkVolumesAsReportedInUse(
[]v1.UniqueVolumeName{volumeName})
case <-stopCh:
return
}
}
}
func runVolumeManager(manager VolumeManager) chan struct{} {
stopCh := make(chan struct{})
//readyCh := make(chan bool, 1)
//readyCh <- true
sourcesReady := config.NewSourcesReady(func(_ sets.String) bool { return true })
go manager.Run(sourcesReady, stopCh)
return stopCh
}