1
0
Fork 0
forked from barak/tarpoon

Add glide.yaml and vendor deps

This commit is contained in:
Dalton Hubble 2016-12-03 22:43:32 -08:00
parent db918f12ad
commit 5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions

60
vendor/k8s.io/kubernetes/test/e2e/common/BUILD generated vendored Normal file
View file

@ -0,0 +1,60 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"configmap.go",
"container_probe.go",
"docker_containers.go",
"downward_api.go",
"downwardapi_volume.go",
"empty_dir.go",
"expansion.go",
"host_path.go",
"init_container.go",
"kubelet_etc_hosts.go",
"networking.go",
"pods.go",
"privileged.go",
"secrets.go",
"sysctl.go",
"util.go",
"volumes.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/errors:go_default_library",
"//pkg/api/resource:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/api/v1/pod:go_default_library",
"//pkg/apimachinery/registered:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/conditions:go_default_library",
"//pkg/kubelet:go_default_library",
"//pkg/kubelet/events:go_default_library",
"//pkg/kubelet/sysctl:go_default_library",
"//pkg/labels:go_default_library",
"//pkg/util/intstr:go_default_library",
"//pkg/util/sets:go_default_library",
"//pkg/util/uuid:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/watch:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
"//vendor:github.com/golang/glog",
"//vendor:github.com/onsi/ginkgo",
"//vendor:github.com/onsi/gomega",
"//vendor:golang.org/x/net/websocket",
],
)

440
vendor/k8s.io/kubernetes/test/e2e/common/configmap.go generated vendored Normal file
View file

@ -0,0 +1,440 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"os"
"time"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("ConfigMap", func() {
f := framework.NewDefaultFramework("configmap")
It("should be consumable from pods in volume [Conformance]", func() {
doConfigMapE2EWithoutMappings(f, 0, 0, nil)
})
It("should be consumable from pods in volume with defaultMode set [Conformance]", func() {
defaultMode := int32(0400)
doConfigMapE2EWithoutMappings(f, 0, 0, &defaultMode)
})
It("should be consumable from pods in volume as non-root [Conformance]", func() {
doConfigMapE2EWithoutMappings(f, 1000, 0, nil)
})
It("should be consumable from pods in volume as non-root with FSGroup [Feature:FSGroup]", func() {
doConfigMapE2EWithoutMappings(f, 1000, 1001, nil)
})
It("should be consumable from pods in volume with mappings [Conformance]", func() {
doConfigMapE2EWithMappings(f, 0, 0, nil)
})
It("should be consumable from pods in volume with mappings and Item mode set[Conformance]", func() {
mode := int32(0400)
doConfigMapE2EWithMappings(f, 0, 0, &mode)
})
It("should be consumable from pods in volume with mappings as non-root [Conformance]", func() {
doConfigMapE2EWithMappings(f, 1000, 0, nil)
})
It("should be consumable from pods in volume with mappings as non-root with FSGroup [Feature:FSGroup]", func() {
doConfigMapE2EWithMappings(f, 1000, 1001, nil)
})
It("updates should be reflected in volume [Conformance]", func() {
// We may have to wait or a full sync period to elapse before the
// Kubelet projects the update into the volume and the container picks
// it up. This timeout is based on the default Kubelet sync period (1
// minute) plus additional time for fudge factor.
const podLogTimeout = 300 * time.Second
name := "configmap-test-upd-" + string(uuid.NewUUID())
volumeName := "configmap-volume"
volumeMountPath := "/etc/configmap-volume"
containerName := "configmap-volume-test"
configMap := &v1.ConfigMap{
ObjectMeta: v1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: name,
},
Data: map[string]string{
"data-1": "value-1",
},
}
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
},
Containers: []v1.Container{
{
Name: containerName,
Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
By("Creating the pod")
f.PodClient().CreateSync(pod)
pollLogs := func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
}
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
By(fmt.Sprintf("Updating configmap %v", configMap.Name))
configMap.ResourceVersion = "" // to force update
configMap.Data["data-1"] = "value-2"
_, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Update(configMap)
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
By("waiting to observe update in volume")
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-2"))
})
It("should be consumable via environment variable [Conformance]", func() {
name := "configmap-test-" + string(uuid.NewUUID())
configMap := newConfigMap(f, name)
By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error
if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "env-test",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "env"},
Env: []v1.EnvVar{
{
Name: "CONFIG_DATA_1",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "data-1",
},
},
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("consume configMaps", pod, 0, []string{
"CONFIG_DATA_1=value-1",
})
})
It("should be consumable in multiple volumes in the same pod [Conformance]", func() {
var (
name = "configmap-test-volume-" + string(uuid.NewUUID())
volumeName = "configmap-volume"
volumeMountPath = "/etc/configmap-volume"
volumeName2 = "configmap-volume-2"
volumeMountPath2 = "/etc/configmap-volume-2"
configMap = newConfigMap(f, name)
)
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
{
Name: volumeName2,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
},
Containers: []v1.Container{
{
Name: "configmap-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{"--file_content=/etc/configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
ReadOnly: true,
},
{
Name: volumeName2,
MountPath: volumeMountPath2,
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("consume configMaps", pod, 0, []string{
"content of file \"/etc/configmap-volume/data-1\": value-1",
})
})
})
func newConfigMap(f *framework.Framework, name string) *v1.ConfigMap {
return &v1.ConfigMap{
ObjectMeta: v1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: name,
},
Data: map[string]string{
"data-1": "value-1",
"data-2": "value-2",
"data-3": "value-3",
},
}
}
func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, defaultMode *int32) {
var (
name = "configmap-test-volume-" + string(uuid.NewUUID())
volumeName = "configmap-volume"
volumeMountPath = "/etc/configmap-volume"
configMap = newConfigMap(f, name)
)
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{},
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
},
Containers: []v1.Container{
{
Name: "configmap-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{
"--file_content=/etc/configmap-volume/data-1",
"--file_mode=/etc/configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
if uid != 0 {
pod.Spec.SecurityContext.RunAsUser = &uid
}
if fsGroup != 0 {
pod.Spec.SecurityContext.FSGroup = &fsGroup
}
if defaultMode != nil {
pod.Spec.Volumes[0].VolumeSource.ConfigMap.DefaultMode = defaultMode
} else {
mode := int32(0644)
defaultMode = &mode
}
// Just check file mode if fsGroup is not set. If fsGroup is set, the
// final mode is adjusted and we are not testing that case.
output := []string{
"content of file \"/etc/configmap-volume/data-1\": value-1",
}
if fsGroup == 0 {
modeString := fmt.Sprintf("%v", os.FileMode(*defaultMode))
output = append(output, "mode of file \"/etc/configmap-volume/data-1\": "+modeString)
}
f.TestContainerOutput("consume configMaps", pod, 0, output)
}
func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, itemMode *int32) {
var (
name = "configmap-test-volume-map-" + string(uuid.NewUUID())
volumeName = "configmap-volume"
volumeMountPath = "/etc/configmap-volume"
configMap = newConfigMap(f, name)
)
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{},
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Items: []v1.KeyToPath{
{
Key: "data-2",
Path: "path/to/data-2",
},
},
},
},
},
},
Containers: []v1.Container{
{
Name: "configmap-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{"--file_content=/etc/configmap-volume/path/to/data-2",
"--file_mode=/etc/configmap-volume/path/to/data-2"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
if uid != 0 {
pod.Spec.SecurityContext.RunAsUser = &uid
}
if fsGroup != 0 {
pod.Spec.SecurityContext.FSGroup = &fsGroup
}
if itemMode != nil {
pod.Spec.Volumes[0].VolumeSource.ConfigMap.Items[0].Mode = itemMode
} else {
mode := int32(0644)
itemMode = &mode
}
// Just check file mode if fsGroup is not set. If fsGroup is set, the
// final mode is adjusted and we are not testing that case.
output := []string{
"content of file \"/etc/configmap-volume/path/to/data-2\": value-2",
}
if fsGroup == 0 {
modeString := fmt.Sprintf("%v", os.FileMode(*itemMode))
output = append(output, "mode of file \"/etc/configmap-volume/path/to/data-2\": "+modeString)
}
f.TestContainerOutput("consume configMaps", pod, 0, output)
}

View file

@ -0,0 +1,405 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"time"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
probTestContainerName = "test-webserver"
probTestInitialDelaySeconds = 15
defaultObservationTimeout = time.Minute * 2
)
var _ = framework.KubeDescribe("Probing container", func() {
f := framework.NewDefaultFramework("container-probe")
var podClient *framework.PodClient
probe := webserverProbeBuilder{}
BeforeEach(func() {
podClient = f.PodClient()
})
It("with readiness probe should not be ready before initial delay and never restart [Conformance]", func() {
p := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil))
f.WaitForPodReady(p.Name)
p, err := podClient.Get(p.Name)
framework.ExpectNoError(err)
isReady, err := testutils.PodRunningReady(p)
framework.ExpectNoError(err)
Expect(isReady).To(BeTrue(), "pod should be ready")
// We assume the pod became ready when the container became ready. This
// is true for a single container pod.
readyTime, err := getTransitionTimeForReadyCondition(p)
framework.ExpectNoError(err)
startedTime, err := getContainerStartedTime(p, probTestContainerName)
framework.ExpectNoError(err)
framework.Logf("Container started at %v, pod became ready at %v", startedTime, readyTime)
initialDelay := probTestInitialDelaySeconds * time.Second
if readyTime.Sub(startedTime) < initialDelay {
framework.Failf("Pod became ready before it's %v initial delay", initialDelay)
}
restartCount := getRestartCount(p)
Expect(restartCount == 0).To(BeTrue(), "pod should have a restart count of 0 but got %v", restartCount)
})
It("with readiness probe that fails should never be ready and never restart [Conformance]", func() {
p := podClient.Create(makePodSpec(probe.withFailing().build(), nil))
Consistently(func() (bool, error) {
p, err := podClient.Get(p.Name)
if err != nil {
return false, err
}
return v1.IsPodReady(p), nil
}, 1*time.Minute, 1*time.Second).ShouldNot(BeTrue(), "pod should not be ready")
p, err := podClient.Get(p.Name)
framework.ExpectNoError(err)
isReady, err := testutils.PodRunningReady(p)
Expect(isReady).NotTo(BeTrue(), "pod should be not ready")
restartCount := getRestartCount(p)
Expect(restartCount == 0).To(BeTrue(), "pod should have a restart count of 0 but got %v", restartCount)
})
It("should be restarted with a exec \"cat /tmp/health\" liveness probe [Conformance]", func() {
runLivenessTest(f, &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "liveness-exec",
Labels: map[string]string{"test": "liveness"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "liveness",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 10; rm -rf /tmp/health; sleep 600"},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"cat", "/tmp/health"},
},
},
InitialDelaySeconds: 15,
FailureThreshold: 1,
},
},
},
},
}, 1, defaultObservationTimeout)
})
It("should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [Conformance]", func() {
runLivenessTest(f, &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "liveness-exec",
Labels: map[string]string{"test": "liveness"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "liveness",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 600"},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"cat", "/tmp/health"},
},
},
InitialDelaySeconds: 15,
FailureThreshold: 1,
},
},
},
},
}, 0, defaultObservationTimeout)
})
It("should be restarted with a /healthz http liveness probe [Conformance]", func() {
runLivenessTest(f, &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "liveness-http",
Labels: map[string]string{"test": "liveness"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "liveness",
Image: "gcr.io/google_containers/liveness:e2e",
Command: []string{"/server"},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/healthz",
Port: intstr.FromInt(8080),
},
},
InitialDelaySeconds: 15,
FailureThreshold: 1,
},
},
},
},
}, 1, defaultObservationTimeout)
})
// Slow by design (5 min)
It("should have monotonically increasing restart count [Conformance] [Slow]", func() {
runLivenessTest(f, &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "liveness-http",
Labels: map[string]string{"test": "liveness"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "liveness",
Image: "gcr.io/google_containers/liveness:e2e",
Command: []string{"/server"},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/healthz",
Port: intstr.FromInt(8080),
},
},
InitialDelaySeconds: 5,
FailureThreshold: 1,
},
},
},
},
}, 5, time.Minute*5)
})
It("should *not* be restarted with a /healthz http liveness probe [Conformance]", func() {
runLivenessTest(f, &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "liveness-http",
Labels: map[string]string{"test": "liveness"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "liveness",
Image: "gcr.io/google_containers/nginx-slim:0.7",
Ports: []v1.ContainerPort{{ContainerPort: 80}},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/",
Port: intstr.FromInt(80),
},
},
InitialDelaySeconds: 15,
TimeoutSeconds: 5,
FailureThreshold: 1,
},
},
},
},
}, 0, defaultObservationTimeout)
})
It("should be restarted with a docker exec liveness probe with timeout [Conformance]", func() {
// TODO: enable this test once the default exec handler supports timeout.
Skip("The default exec handler, dockertools.NativeExecHandler, does not support timeouts due to a limitation in the Docker Remote API")
runLivenessTest(f, &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "liveness-exec",
Labels: map[string]string{"test": "liveness"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "liveness",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh", "-c", "sleep 600"},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"/bin/sh", "-c", "sleep 10"},
},
},
InitialDelaySeconds: 15,
TimeoutSeconds: 1,
FailureThreshold: 1,
},
},
},
},
}, 1, defaultObservationTimeout)
})
})
func getContainerStartedTime(p *v1.Pod, containerName string) (time.Time, error) {
for _, status := range p.Status.ContainerStatuses {
if status.Name != containerName {
continue
}
if status.State.Running == nil {
return time.Time{}, fmt.Errorf("Container is not running")
}
return status.State.Running.StartedAt.Time, nil
}
return time.Time{}, fmt.Errorf("cannot find container named %q", containerName)
}
func getTransitionTimeForReadyCondition(p *v1.Pod) (time.Time, error) {
for _, cond := range p.Status.Conditions {
if cond.Type == v1.PodReady {
return cond.LastTransitionTime.Time, nil
}
}
return time.Time{}, fmt.Errorf("No ready condition can be found for pod")
}
func getRestartCount(p *v1.Pod) int {
count := 0
for _, containerStatus := range p.Status.ContainerStatuses {
count += int(containerStatus.RestartCount)
}
return count
}
func makePodSpec(readinessProbe, livenessProbe *v1.Probe) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{Name: "test-webserver-" + string(uuid.NewUUID())},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: probTestContainerName,
Image: "gcr.io/google_containers/test-webserver:e2e",
LivenessProbe: livenessProbe,
ReadinessProbe: readinessProbe,
},
},
},
}
return pod
}
type webserverProbeBuilder struct {
failing bool
initialDelay bool
}
func (b webserverProbeBuilder) withFailing() webserverProbeBuilder {
b.failing = true
return b
}
func (b webserverProbeBuilder) withInitialDelay() webserverProbeBuilder {
b.initialDelay = true
return b
}
func (b webserverProbeBuilder) build() *v1.Probe {
probe := &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Port: intstr.FromInt(80),
Path: "/",
},
},
}
if b.initialDelay {
probe.InitialDelaySeconds = probTestInitialDelaySeconds
}
if b.failing {
probe.HTTPGet.Port = intstr.FromInt(81)
}
return probe
}
func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, timeout time.Duration) {
podClient := f.PodClient()
ns := f.Namespace.Name
Expect(pod.Spec.Containers).NotTo(BeEmpty())
containerName := pod.Spec.Containers[0].Name
// At the end of the test, clean up by removing the pod.
defer func() {
By("deleting the pod")
podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
}()
By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
podClient.Create(pod)
// Wait until the pod is not pending. (Here we need to check for something other than
// 'Pending' other than checking for 'Running', since when failures occur, we go to
// 'Terminated' which can cause indefinite blocking.)
framework.ExpectNoError(framework.WaitForPodNotPending(f.ClientSet, ns, pod.Name, pod.ResourceVersion),
fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns))
framework.Logf("Started pod %s in namespace %s", pod.Name, ns)
// Check the pod's current state and verify that restartCount is present.
By("checking the pod's current state and verifying that restartCount is present")
pod, err := podClient.Get(pod.Name)
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns))
initialRestartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)
// Wait for the restart state to be as desired.
deadline := time.Now().Add(timeout)
lastRestartCount := initialRestartCount
observedRestarts := int32(0)
for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) {
pod, err = podClient.Get(pod.Name)
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name))
restartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
if restartCount != lastRestartCount {
framework.Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
ns, pod.Name, restartCount, time.Since(start))
if restartCount < lastRestartCount {
framework.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d",
ns, pod.Name, lastRestartCount, restartCount)
}
}
observedRestarts = restartCount - initialRestartCount
if expectNumRestarts > 0 && int(observedRestarts) >= expectNumRestarts {
// Stop if we have observed more than expectNumRestarts restarts.
break
}
lastRestartCount = restartCount
}
// If we expected 0 restarts, fail if observed any restart.
// If we expected n restarts (n > 0), fail if we observed < n restarts.
if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 &&
int(observedRestarts) < expectNumRestarts) {
framework.Failf("pod %s/%s - expected number of restarts: %d, found restarts: %d",
ns, pod.Name, expectNumRestarts, observedRestarts)
}
}

View file

@ -0,0 +1,87 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
var _ = framework.KubeDescribe("Docker Containers", func() {
f := framework.NewDefaultFramework("containers")
It("should use the image defaults if command and args are blank [Conformance]", func() {
f.TestContainerOutput("use defaults", entrypointTestPod(), 0, []string{
"[/ep default arguments]",
})
})
It("should be able to override the image's default arguments (docker cmd) [Conformance]", func() {
pod := entrypointTestPod()
pod.Spec.Containers[0].Args = []string{"override", "arguments"}
f.TestContainerOutput("override arguments", pod, 0, []string{
"[/ep override arguments]",
})
})
// Note: when you override the entrypoint, the image's arguments (docker cmd)
// are ignored.
It("should be able to override the image's default commmand (docker entrypoint) [Conformance]", func() {
pod := entrypointTestPod()
pod.Spec.Containers[0].Command = []string{"/ep-2"}
f.TestContainerOutput("override command", pod, 0, []string{
"[/ep-2]",
})
})
It("should be able to override the image's default command and arguments [Conformance]", func() {
pod := entrypointTestPod()
pod.Spec.Containers[0].Command = []string{"/ep-2"}
pod.Spec.Containers[0].Args = []string{"override", "arguments"}
f.TestContainerOutput("override all", pod, 0, []string{
"[/ep-2 override arguments]",
})
})
})
const testContainerName = "test-container"
// Return a prototypical entrypoint test pod
func entrypointTestPod() *v1.Pod {
podName := "client-containers-" + string(uuid.NewUUID())
return &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: testContainerName,
Image: "gcr.io/google_containers/eptest:0.1",
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
}

View file

@ -0,0 +1,211 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
var _ = framework.KubeDescribe("Downward API", func() {
f := framework.NewDefaultFramework("downward-api")
It("should provide pod name and namespace as env vars [Conformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.name",
},
},
},
{
Name: "POD_NAMESPACE",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.namespace",
},
},
},
}
expectations := []string{
fmt.Sprintf("POD_NAME=%v", podName),
fmt.Sprintf("POD_NAMESPACE=%v", f.Namespace.Name),
}
testDownwardAPI(f, podName, env, expectations)
})
It("should provide pod IP as an env var [Conformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.podIP",
},
},
},
}
expectations := []string{
"POD_IP=(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)",
}
testDownwardAPI(f, podName, env, expectations)
})
It("should provide container's limits.cpu/memory and requests.cpu/memory as env vars [Conformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{
Name: "CPU_LIMIT",
ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "limits.cpu",
},
},
},
{
Name: "MEMORY_LIMIT",
ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "limits.memory",
},
},
},
{
Name: "CPU_REQUEST",
ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "requests.cpu",
},
},
},
{
Name: "MEMORY_REQUEST",
ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "requests.memory",
},
},
},
}
expectations := []string{
fmt.Sprintf("CPU_LIMIT=2"),
fmt.Sprintf("MEMORY_LIMIT=67108864"),
fmt.Sprintf("CPU_REQUEST=1"),
fmt.Sprintf("MEMORY_REQUEST=33554432"),
}
testDownwardAPI(f, podName, env, expectations)
})
It("should provide default limits.cpu/memory from node allocatable [Conformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{
Name: "CPU_LIMIT",
ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "limits.cpu",
},
},
},
{
Name: "MEMORY_LIMIT",
ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "limits.memory",
},
},
},
}
expectations := []string{
fmt.Sprintf("CPU_LIMIT=[1-9]"),
fmt.Sprintf("MEMORY_LIMIT=[1-9]"),
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "dapi-container",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "env"},
Env: env,
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
testDownwardAPIUsingPod(f, pod, env, expectations)
})
})
func testDownwardAPI(f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) {
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "dapi-container",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "env"},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("250m"),
v1.ResourceMemory: resource.MustParse("32Mi"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1250m"),
v1.ResourceMemory: resource.MustParse("64Mi"),
},
},
Env: env,
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
testDownwardAPIUsingPod(f, pod, env, expectations)
}
func testDownwardAPIUsingPod(f *framework.Framework, pod *v1.Pod, env []v1.EnvVar, expectations []string) {
f.TestContainerOutputRegexp("downward api env vars", pod, 0, expectations)
}

View file

@ -0,0 +1,401 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"time"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("Downward API volume", func() {
// How long to wait for a log pod to be displayed
const podLogTimeout = 2 * time.Minute
f := framework.NewDefaultFramework("downward-api")
var podClient *framework.PodClient
BeforeEach(func() {
podClient = f.PodClient()
})
It("should provide podname only [Conformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podname")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName),
})
})
It("should set DefaultMode on files [Conformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
defaultMode := int32(0400)
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podname", nil, &defaultMode)
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podname\": -r--------",
})
})
It("should set mode on item file [Conformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
mode := int32(0400)
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podname", &mode, nil)
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podname\": -r--------",
})
})
It("should provide podname as non-root with fsgroup [Feature:FSGroup]", func() {
podName := "metadata-volume-" + string(uuid.NewUUID())
uid := int64(1001)
gid := int64(1234)
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podname")
pod.Spec.SecurityContext = &v1.PodSecurityContext{
RunAsUser: &uid,
FSGroup: &gid,
}
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName),
})
})
It("should update labels on modification [Conformance]", func() {
labels := map[string]string{}
labels["key1"] = "value1"
labels["key2"] = "value2"
podName := "labelsupdate" + string(uuid.NewUUID())
pod := downwardAPIVolumePodForUpdateTest(podName, labels, map[string]string{}, "/etc/labels")
containerName := "client-container"
By("Creating the pod")
podClient.CreateSync(pod)
Eventually(func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("key1=\"value1\"\n"))
//modify labels
podClient.Update(podName, func(pod *v1.Pod) {
pod.Labels["key3"] = "value3"
})
Eventually(func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("key3=\"value3\"\n"))
})
It("should update annotations on modification [Conformance]", func() {
annotations := map[string]string{}
annotations["builder"] = "bar"
podName := "annotationupdate" + string(uuid.NewUUID())
pod := downwardAPIVolumePodForUpdateTest(podName, map[string]string{}, annotations, "/etc/annotations")
containerName := "client-container"
By("Creating the pod")
podClient.CreateSync(pod)
pod, err := podClient.Get(pod.Name)
Expect(err).NotTo(HaveOccurred(), "Failed to get pod %q", pod.Name)
Eventually(func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"bar\"\n"))
//modify annotations
podClient.Update(podName, func(pod *v1.Pod) {
pod.Annotations["builder"] = "foo"
})
Eventually(func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"foo\"\n"))
})
It("should provide container's cpu limit [Conformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/cpu_limit")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("2\n"),
})
})
It("should provide container's memory limit [Conformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/memory_limit")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("67108864\n"),
})
})
It("should provide container's cpu request [Conformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/cpu_request")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("1\n"),
})
})
It("should provide container's memory request [Conformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/memory_request")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("33554432\n"),
})
})
It("should provide node allocatable (cpu) as default cpu limit if the limit is not set [Conformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/cpu_limit")
f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"})
})
It("should provide node allocatable (memory) as default memory limit if the limit is not set [Conformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/memory_limit")
f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"})
})
})
func downwardAPIVolumePodForModeTest(name, filePath string, itemMode, defaultMode *int32) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, nil, nil)
pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--file_mode=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
},
},
},
}
if itemMode != nil {
pod.Spec.Volumes[0].VolumeSource.DownwardAPI.Items[0].Mode = itemMode
}
if defaultMode != nil {
pod.Spec.Volumes[0].VolumeSource.DownwardAPI.DefaultMode = defaultMode
}
return pod
}
func downwardAPIVolumePodForSimpleTest(name string, filePath string) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, nil, nil)
pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--file_content=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
ReadOnly: false,
},
},
},
}
return pod
}
func downwardAPIVolumeForContainerResources(name string, filePath string) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, nil, nil)
pod.Spec.Containers = downwardAPIVolumeBaseContainers("client-container", filePath)
return pod
}
func downwardAPIVolumeForDefaultContainerResources(name string, filePath string) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, nil, nil)
pod.Spec.Containers = downwardAPIVolumeDefaultBaseContainer("client-container", filePath)
return pod
}
func downwardAPIVolumeBaseContainers(name, filePath string) []v1.Container {
return []v1.Container{
{
Name: name,
Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--file_content=" + filePath},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("250m"),
v1.ResourceMemory: resource.MustParse("32Mi"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1250m"),
v1.ResourceMemory: resource.MustParse("64Mi"),
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
ReadOnly: false,
},
},
},
}
}
func downwardAPIVolumeDefaultBaseContainer(name, filePath string) []v1.Container {
return []v1.Container{
{
Name: name,
Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--file_content=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
},
},
},
}
}
func downwardAPIVolumePodForUpdateTest(name string, labels, annotations map[string]string, filePath string) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, labels, annotations)
pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
ReadOnly: false,
},
},
},
}
applyLabelsAndAnnotationsToDownwardAPIPod(labels, annotations, pod)
return pod
}
func downwardAPIVolumeBasePod(name string, labels, annotations map[string]string) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: labels,
Annotations: annotations,
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "podinfo",
VolumeSource: v1.VolumeSource{
DownwardAPI: &v1.DownwardAPIVolumeSource{
Items: []v1.DownwardAPIVolumeFile{
{
Path: "podname",
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.name",
},
},
{
Path: "cpu_limit",
ResourceFieldRef: &v1.ResourceFieldSelector{
ContainerName: "client-container",
Resource: "limits.cpu",
},
},
{
Path: "cpu_request",
ResourceFieldRef: &v1.ResourceFieldSelector{
ContainerName: "client-container",
Resource: "requests.cpu",
},
},
{
Path: "memory_limit",
ResourceFieldRef: &v1.ResourceFieldSelector{
ContainerName: "client-container",
Resource: "limits.memory",
},
},
{
Path: "memory_request",
ResourceFieldRef: &v1.ResourceFieldSelector{
ContainerName: "client-container",
Resource: "requests.memory",
},
},
},
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
return pod
}
func applyLabelsAndAnnotationsToDownwardAPIPod(labels, annotations map[string]string, pod *v1.Pod) {
if len(labels) > 0 {
pod.Spec.Volumes[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].DownwardAPI.Items, v1.DownwardAPIVolumeFile{
Path: "labels",
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.labels",
},
})
}
if len(annotations) > 0 {
pod.Spec.Volumes[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].DownwardAPI.Items, v1.DownwardAPIVolumeFile{
Path: "annotations",
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.annotations",
},
})
}
}
// TODO: add test-webserver example as pointed out in https://github.com/kubernetes/kubernetes/pull/5093#discussion-diff-37606771

352
vendor/k8s.io/kubernetes/test/e2e/common/empty_dir.go generated vendored Normal file
View file

@ -0,0 +1,352 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"path"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
const (
testImageRootUid = "gcr.io/google_containers/mounttest:0.7"
testImageNonRootUid = "gcr.io/google_containers/mounttest-user:0.3"
)
var _ = framework.KubeDescribe("EmptyDir volumes", func() {
f := framework.NewDefaultFramework("emptydir")
Context("when FSGroup is specified [Feature:FSGroup]", func() {
It("new files should be created with FSGroup ownership when container is root", func() {
doTestSetgidFSGroup(f, testImageRootUid, v1.StorageMediumMemory)
})
It("new files should be created with FSGroup ownership when container is non-root", func() {
doTestSetgidFSGroup(f, testImageNonRootUid, v1.StorageMediumMemory)
})
It("files with FSGroup ownership should support (root,0644,tmpfs)", func() {
doTest0644FSGroup(f, testImageRootUid, v1.StorageMediumMemory)
})
It("volume on default medium should have the correct mode using FSGroup", func() {
doTestVolumeModeFSGroup(f, testImageRootUid, v1.StorageMediumDefault)
})
It("volume on tmpfs should have the correct mode using FSGroup", func() {
doTestVolumeModeFSGroup(f, testImageRootUid, v1.StorageMediumMemory)
})
})
It("volume on tmpfs should have the correct mode [Conformance]", func() {
doTestVolumeMode(f, testImageRootUid, v1.StorageMediumMemory)
})
It("should support (root,0644,tmpfs) [Conformance]", func() {
doTest0644(f, testImageRootUid, v1.StorageMediumMemory)
})
It("should support (root,0666,tmpfs) [Conformance]", func() {
doTest0666(f, testImageRootUid, v1.StorageMediumMemory)
})
It("should support (root,0777,tmpfs) [Conformance]", func() {
doTest0777(f, testImageRootUid, v1.StorageMediumMemory)
})
It("should support (non-root,0644,tmpfs) [Conformance]", func() {
doTest0644(f, testImageNonRootUid, v1.StorageMediumMemory)
})
It("should support (non-root,0666,tmpfs) [Conformance]", func() {
doTest0666(f, testImageNonRootUid, v1.StorageMediumMemory)
})
It("should support (non-root,0777,tmpfs) [Conformance]", func() {
doTest0777(f, testImageNonRootUid, v1.StorageMediumMemory)
})
It("volume on default medium should have the correct mode [Conformance]", func() {
doTestVolumeMode(f, testImageRootUid, v1.StorageMediumDefault)
})
It("should support (root,0644,default) [Conformance]", func() {
doTest0644(f, testImageRootUid, v1.StorageMediumDefault)
})
It("should support (root,0666,default) [Conformance]", func() {
doTest0666(f, testImageRootUid, v1.StorageMediumDefault)
})
It("should support (root,0777,default) [Conformance]", func() {
doTest0777(f, testImageRootUid, v1.StorageMediumDefault)
})
It("should support (non-root,0644,default) [Conformance]", func() {
doTest0644(f, testImageNonRootUid, v1.StorageMediumDefault)
})
It("should support (non-root,0666,default) [Conformance]", func() {
doTest0666(f, testImageNonRootUid, v1.StorageMediumDefault)
})
It("should support (non-root,0777,default) [Conformance]", func() {
doTest0777(f, testImageNonRootUid, v1.StorageMediumDefault)
})
})
const (
containerName = "test-container"
volumeName = "test-volume"
)
func doTestSetgidFSGroup(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(testImageRootUid, volumePath, source)
)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--new_file_0660=%v", filePath),
fmt.Sprintf("--file_perm=%v", filePath),
fmt.Sprintf("--file_owner=%v", filePath),
}
fsGroup := int64(123)
pod.Spec.SecurityContext.FSGroup = &fsGroup
msg := fmt.Sprintf("emptydir 0644 on %v", formatMedium(medium))
out := []string{
"perms of file \"/test-volume/test-file\": -rw-rw----",
"content of file \"/test-volume/test-file\": mount-tester new file",
"owner GID of \"/test-volume/test-file\": 123",
}
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
func doTestVolumeModeFSGroup(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
volumePath = "/test-volume"
source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(testImageRootUid, volumePath, source)
)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--file_perm=%v", volumePath),
}
fsGroup := int64(1001)
pod.Spec.SecurityContext.FSGroup = &fsGroup
msg := fmt.Sprintf("emptydir volume type on %v", formatMedium(medium))
out := []string{
"perms of file \"/test-volume\": -rwxrwxrwx",
}
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
func doTest0644FSGroup(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(image, volumePath, source)
)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--new_file_0644=%v", filePath),
fmt.Sprintf("--file_perm=%v", filePath),
}
fsGroup := int64(123)
pod.Spec.SecurityContext.FSGroup = &fsGroup
msg := fmt.Sprintf("emptydir 0644 on %v", formatMedium(medium))
out := []string{
"perms of file \"/test-volume/test-file\": -rw-r--r--",
"content of file \"/test-volume/test-file\": mount-tester new file",
}
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
func doTestVolumeMode(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
volumePath = "/test-volume"
source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(testImageRootUid, volumePath, source)
)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--file_perm=%v", volumePath),
}
msg := fmt.Sprintf("emptydir volume type on %v", formatMedium(medium))
out := []string{
"perms of file \"/test-volume\": -rwxrwxrwx",
}
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
func doTest0644(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(image, volumePath, source)
)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--new_file_0644=%v", filePath),
fmt.Sprintf("--file_perm=%v", filePath),
}
msg := fmt.Sprintf("emptydir 0644 on %v", formatMedium(medium))
out := []string{
"perms of file \"/test-volume/test-file\": -rw-r--r--",
"content of file \"/test-volume/test-file\": mount-tester new file",
}
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
func doTest0666(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(image, volumePath, source)
)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--new_file_0666=%v", filePath),
fmt.Sprintf("--file_perm=%v", filePath),
}
msg := fmt.Sprintf("emptydir 0666 on %v", formatMedium(medium))
out := []string{
"perms of file \"/test-volume/test-file\": -rw-rw-rw-",
"content of file \"/test-volume/test-file\": mount-tester new file",
}
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
func doTest0777(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(image, volumePath, source)
)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--new_file_0777=%v", filePath),
fmt.Sprintf("--file_perm=%v", filePath),
}
msg := fmt.Sprintf("emptydir 0777 on %v", formatMedium(medium))
out := []string{
"perms of file \"/test-volume/test-file\": -rwxrwxrwx",
"content of file \"/test-volume/test-file\": mount-tester new file",
}
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
func formatMedium(medium v1.StorageMedium) string {
if medium == v1.StorageMediumMemory {
return "tmpfs"
}
return "node default medium"
}
func testPodWithVolume(image, path string, source *v1.EmptyDirVolumeSource) *v1.Pod {
podName := "pod-" + string(uuid.NewUUID())
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: containerName,
Image: image,
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: path,
},
},
},
},
SecurityContext: &v1.PodSecurityContext{
SELinuxOptions: &v1.SELinuxOptions{
Level: "s0",
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
EmptyDir: source,
},
},
},
},
}
}

132
vendor/k8s.io/kubernetes/test/e2e/common/expansion.go generated vendored Normal file
View file

@ -0,0 +1,132 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
// These tests exercise the Kubernetes expansion syntax $(VAR).
// For more information, see: docs/design/expansion.md
var _ = framework.KubeDescribe("Variable Expansion", func() {
f := framework.NewDefaultFramework("var-expansion")
It("should allow composing env vars into new env vars [Conformance]", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "dapi-container",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "env"},
Env: []v1.EnvVar{
{
Name: "FOO",
Value: "foo-value",
},
{
Name: "BAR",
Value: "bar-value",
},
{
Name: "FOOBAR",
Value: "$(FOO);;$(BAR)",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("env composition", pod, 0, []string{
"FOO=foo-value",
"BAR=bar-value",
"FOOBAR=foo-value;;bar-value",
})
})
It("should allow substituting values in a container's command [Conformance]", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "dapi-container",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "TEST_VAR=wrong echo \"$(TEST_VAR)\""},
Env: []v1.EnvVar{
{
Name: "TEST_VAR",
Value: "test-value",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("substitution in container's command", pod, 0, []string{
"test-value",
})
})
It("should allow substituting values in a container's args [Conformance]", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "dapi-container",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c"},
Args: []string{"TEST_VAR=wrong echo \"$(TEST_VAR)\""},
Env: []v1.EnvVar{
{
Name: "TEST_VAR",
Value: "test-value",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("substitution in container's args", pod, 0, []string{
"test-value",
})
})
})

171
vendor/k8s.io/kubernetes/test/e2e/common/host_path.go generated vendored Normal file
View file

@ -0,0 +1,171 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"os"
"path"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
//TODO : Consolidate this code with the code for emptyDir.
//This will require some smart.
var _ = framework.KubeDescribe("HostPath", func() {
f := framework.NewDefaultFramework("hostpath")
BeforeEach(func() {
//cleanup before running the test.
_ = os.Remove("/tmp/test-file")
})
It("should give a volume the correct mode [Conformance]", func() {
volumePath := "/test-volume"
source := &v1.HostPathVolumeSource{
Path: "/tmp",
}
pod := testPodWithHostVol(volumePath, source)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--file_mode=%v", volumePath),
}
f.TestContainerOutput("hostPath mode", pod, 0, []string{
"mode of file \"/test-volume\": dtrwxrwxrwx", // we expect the sticky bit (mode flag t) to be set for the dir
})
})
// This test requires mounting a folder into a container with write privileges.
It("should support r/w", func() {
volumePath := "/test-volume"
filePath := path.Join(volumePath, "test-file")
retryDuration := 180
source := &v1.HostPathVolumeSource{
Path: "/tmp",
}
pod := testPodWithHostVol(volumePath, source)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--new_file_0644=%v", filePath),
fmt.Sprintf("--file_mode=%v", filePath),
}
pod.Spec.Containers[1].Args = []string{
fmt.Sprintf("--file_content_in_loop=%v", filePath),
fmt.Sprintf("--retry_time=%d", retryDuration),
}
//Read the content of the file with the second container to
//verify volumes being shared properly among containers within the pod.
f.TestContainerOutput("hostPath r/w", pod, 1, []string{
"content of file \"/test-volume/test-file\": mount-tester new file",
})
})
It("should support subPath", func() {
volumePath := "/test-volume"
subPath := "sub-path"
fileName := "test-file"
retryDuration := 180
filePathInWriter := path.Join(volumePath, fileName)
filePathInReader := path.Join(volumePath, subPath, fileName)
source := &v1.HostPathVolumeSource{
Path: "/tmp",
}
pod := testPodWithHostVol(volumePath, source)
// Write the file in the subPath from container 0
container := &pod.Spec.Containers[0]
container.VolumeMounts[0].SubPath = subPath
container.Args = []string{
fmt.Sprintf("--new_file_0644=%v", filePathInWriter),
fmt.Sprintf("--file_mode=%v", filePathInWriter),
}
// Read it from outside the subPath from container 1
pod.Spec.Containers[1].Args = []string{
fmt.Sprintf("--file_content_in_loop=%v", filePathInReader),
fmt.Sprintf("--retry_time=%d", retryDuration),
}
f.TestContainerOutput("hostPath subPath", pod, 1, []string{
"content of file \"" + filePathInReader + "\": mount-tester new file",
})
})
})
//These constants are borrowed from the other test.
//const volumeName = "test-volume"
const containerName1 = "test-container-1"
const containerName2 = "test-container-2"
func mount(source *v1.HostPathVolumeSource) []v1.Volume {
return []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
HostPath: source,
},
},
}
}
//TODO: To merge this with the emptyDir tests, we can make source a lambda.
func testPodWithHostVol(path string, source *v1.HostPathVolumeSource) *v1.Pod {
podName := "pod-host-path-test"
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: containerName1,
Image: "gcr.io/google_containers/mounttest:0.7",
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: path,
},
},
},
{
Name: containerName2,
Image: "gcr.io/google_containers/mounttest:0.7",
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: path,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: mount(source),
},
}
}

View file

@ -0,0 +1,408 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"strconv"
"time"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/watch"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("InitContainer", func() {
f := framework.NewDefaultFramework("init-container")
var podClient *framework.PodClient
BeforeEach(func() {
podClient = f.PodClient()
})
It("should invoke init containers on a RestartNever pod", func() {
framework.SkipIfContainerRuntimeIs("rkt") // #25988
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
InitContainers: []v1.Container{
{
Name: "init1",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/true"},
},
{
Name: "init2",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/true"},
},
},
Containers: []v1.Container{
{
Name: "run1",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/true"},
},
},
},
}
if err := podutil.SetInitContainersAnnotations(pod); err != nil {
Expect(err).To(BeNil())
}
startedPod := podClient.Create(pod)
w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodCompleted)
Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*v1.Pod)
if err := podutil.SetInitContainersAndStatuses(endPod); err != nil {
Expect(err).To(BeNil())
}
Expect(endPod.Status.Phase).To(Equal(v1.PodSucceeded))
_, init := v1.GetPodCondition(&endPod.Status, v1.PodInitialized)
Expect(init).NotTo(BeNil())
Expect(init.Status).To(Equal(v1.ConditionTrue))
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
for _, status := range endPod.Status.InitContainerStatuses {
Expect(status.Ready).To(BeTrue())
Expect(status.State.Terminated).NotTo(BeNil())
Expect(status.State.Terminated.ExitCode).To(BeZero())
}
})
It("should invoke init containers on a RestartAlways pod", func() {
framework.SkipIfContainerRuntimeIs("rkt") // #25988
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: "init1",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/true"},
},
{
Name: "init2",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/true"},
},
},
Containers: []v1.Container{
{
Name: "run1",
Image: framework.GetPauseImageName(f.ClientSet),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
},
},
},
},
},
}
if err := podutil.SetInitContainersAnnotations(pod); err != nil {
Expect(err).To(BeNil())
}
startedPod := podClient.Create(pod)
w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodRunning)
Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*v1.Pod)
Expect(endPod.Status.Phase).To(Equal(v1.PodRunning))
_, init := v1.GetPodCondition(&endPod.Status, v1.PodInitialized)
Expect(init).NotTo(BeNil())
Expect(init.Status).To(Equal(v1.ConditionTrue))
if err := podutil.SetInitContainersAndStatuses(endPod); err != nil {
Expect(err).To(BeNil())
}
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
for _, status := range endPod.Status.InitContainerStatuses {
Expect(status.Ready).To(BeTrue())
Expect(status.State.Terminated).NotTo(BeNil())
Expect(status.State.Terminated.ExitCode).To(BeZero())
}
})
It("should not start app containers if init containers fail on a RestartAlways pod", func() {
framework.SkipIfContainerRuntimeIs("rkt") // #25988
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: "init1",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/false"},
},
{
Name: "init2",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/true"},
},
},
Containers: []v1.Container{
{
Name: "run1",
Image: framework.GetPauseImageName(f.ClientSet),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
},
},
},
},
},
}
if err := podutil.SetInitContainersAnnotations(pod); err != nil {
Expect(err).To(BeNil())
}
startedPod := podClient.Create(pod)
w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
event, err := watch.Until(
framework.PodStartTimeout, wr,
// check for the first container to fail at least once
func(evt watch.Event) (bool, error) {
switch t := evt.Object.(type) {
case *v1.Pod:
if err := podutil.SetInitContainersAndStatuses(t); err != nil {
Expect(err).To(BeNil())
}
for _, status := range t.Status.ContainerStatuses {
if status.State.Waiting == nil {
return false, fmt.Errorf("container %q should not be out of waiting: %#v", status.Name, status)
}
if status.State.Waiting.Reason != "PodInitializing" {
return false, fmt.Errorf("container %q should have reason PodInitializing: %#v", status.Name, status)
}
}
if len(t.Status.InitContainerStatuses) != 2 {
return false, nil
}
status := t.Status.InitContainerStatuses[1]
if status.State.Waiting == nil {
return false, fmt.Errorf("second init container should not be out of waiting: %#v", status)
}
if status.State.Waiting.Reason != "PodInitializing" {
return false, fmt.Errorf("second init container should have reason PodInitializing: %#v", status)
}
status = t.Status.InitContainerStatuses[0]
if status.State.Terminated != nil && status.State.Terminated.ExitCode == 0 {
return false, fmt.Errorf("first init container should have exitCode != 0: %#v", status)
}
// continue until we see an attempt to restart the pod
return status.LastTerminationState.Terminated != nil, nil
default:
return false, fmt.Errorf("unexpected object: %#v", t)
}
},
// verify we get two restarts
func(evt watch.Event) (bool, error) {
switch t := evt.Object.(type) {
case *v1.Pod:
if err := podutil.SetInitContainersAndStatuses(t); err != nil {
Expect(err).To(BeNil())
}
status := t.Status.InitContainerStatuses[0]
if status.RestartCount < 3 {
return false, nil
}
framework.Logf("init container has failed twice: %#v", t)
// TODO: more conditions
return true, nil
default:
return false, fmt.Errorf("unexpected object: %#v", t)
}
},
)
Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*v1.Pod)
if err := podutil.SetInitContainersAndStatuses(endPod); err != nil {
Expect(err).To(BeNil())
}
Expect(endPod.Status.Phase).To(Equal(v1.PodPending))
_, init := v1.GetPodCondition(&endPod.Status, v1.PodInitialized)
Expect(init).NotTo(BeNil())
Expect(init.Status).To(Equal(v1.ConditionFalse))
Expect(init.Reason).To(Equal("ContainersNotInitialized"))
Expect(init.Message).To(Equal("containers with incomplete status: [init1 init2]"))
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
})
It("should not start app containers and fail the pod if init containers fail on a RestartNever pod", func() {
framework.SkipIfContainerRuntimeIs("rkt") // #25988
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
InitContainers: []v1.Container{
{
Name: "init1",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/true"},
},
{
Name: "init2",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/false"},
},
},
Containers: []v1.Container{
{
Name: "run1",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/true"},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
},
},
},
},
},
}
if err := podutil.SetInitContainersAnnotations(pod); err != nil {
Expect(err).To(BeNil())
}
startedPod := podClient.Create(pod)
w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
event, err := watch.Until(
framework.PodStartTimeout, wr,
// check for the second container to fail at least once
func(evt watch.Event) (bool, error) {
switch t := evt.Object.(type) {
case *v1.Pod:
if err := podutil.SetInitContainersAndStatuses(t); err != nil {
Expect(err).To(BeNil())
}
for _, status := range t.Status.ContainerStatuses {
if status.State.Waiting == nil {
return false, fmt.Errorf("container %q should not be out of waiting: %#v", status.Name, status)
}
if status.State.Waiting.Reason != "PodInitializing" {
return false, fmt.Errorf("container %q should have reason PodInitializing: %#v", status.Name, status)
}
}
if len(t.Status.InitContainerStatuses) != 2 {
return false, nil
}
status := t.Status.InitContainerStatuses[0]
if status.State.Terminated == nil {
if status.State.Waiting != nil && status.State.Waiting.Reason != "PodInitializing" {
return false, fmt.Errorf("second init container should have reason PodInitializing: %#v", status)
}
return false, nil
}
if status.State.Terminated != nil && status.State.Terminated.ExitCode != 0 {
return false, fmt.Errorf("first init container should have exitCode != 0: %#v", status)
}
status = t.Status.InitContainerStatuses[1]
if status.State.Terminated == nil {
return false, nil
}
if status.State.Terminated.ExitCode == 0 {
return false, fmt.Errorf("second init container should have failed: %#v", status)
}
return true, nil
default:
return false, fmt.Errorf("unexpected object: %#v", t)
}
},
conditions.PodCompleted,
)
Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*v1.Pod)
Expect(endPod.Status.Phase).To(Equal(v1.PodFailed))
_, init := v1.GetPodCondition(&endPod.Status, v1.PodInitialized)
Expect(init).NotTo(BeNil())
Expect(init.Status).To(Equal(v1.ConditionFalse))
Expect(init.Reason).To(Equal("ContainersNotInitialized"))
Expect(init.Message).To(Equal("containers with incomplete status: [init2]"))
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
Expect(endPod.Status.ContainerStatuses[0].State.Waiting).ToNot(BeNil())
})
})

View file

@ -0,0 +1,218 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"strings"
"time"
"github.com/golang/glog"
. "github.com/onsi/ginkgo"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
etcHostsImageName = "gcr.io/google_containers/netexec:1.7"
etcHostsPodName = "test-pod"
etcHostsHostNetworkPodName = "test-host-network-pod"
etcHostsPartialContent = "# Kubernetes-managed hosts file."
)
type KubeletManagedHostConfig struct {
hostNetworkPod *v1.Pod
pod *v1.Pod
f *framework.Framework
}
var _ = framework.KubeDescribe("KubeletManagedEtcHosts", func() {
f := framework.NewDefaultFramework("e2e-kubelet-etc-hosts")
config := &KubeletManagedHostConfig{
f: f,
}
It("should test kubelet managed /etc/hosts file [Conformance]", func() {
By("Setting up the test")
config.setup()
By("Running the test")
config.verifyEtcHosts()
})
})
func (config *KubeletManagedHostConfig) verifyEtcHosts() {
By("Verifying /etc/hosts of container is kubelet-managed for pod with hostNetwork=false")
assertManagedStatus(config, etcHostsPodName, true, "busybox-1")
assertManagedStatus(config, etcHostsPodName, true, "busybox-2")
By("Verifying /etc/hosts of container is not kubelet-managed since container specifies /etc/hosts mount")
assertManagedStatus(config, etcHostsPodName, false, "busybox-3")
By("Verifying /etc/hosts content of container is not kubelet-managed for pod with hostNetwork=true")
assertManagedStatus(config, etcHostsHostNetworkPodName, false, "busybox-1")
assertManagedStatus(config, etcHostsHostNetworkPodName, false, "busybox-2")
}
func (config *KubeletManagedHostConfig) setup() {
By("Creating hostNetwork=false pod")
config.createPodWithoutHostNetwork()
By("Creating hostNetwork=true pod")
config.createPodWithHostNetwork()
}
func (config *KubeletManagedHostConfig) createPodWithoutHostNetwork() {
podSpec := config.createPodSpec(etcHostsPodName)
config.pod = config.f.PodClient().CreateSync(podSpec)
}
func (config *KubeletManagedHostConfig) createPodWithHostNetwork() {
podSpec := config.createPodSpecWithHostNetwork(etcHostsHostNetworkPodName)
config.hostNetworkPod = config.f.PodClient().CreateSync(podSpec)
}
func assertManagedStatus(
config *KubeletManagedHostConfig, podName string, expectedIsManaged bool, name string) {
// TODO: workaround for https://github.com/kubernetes/kubernetes/issues/34256
//
// Retry until timeout for the contents of /etc/hosts to show
// up. Note: if /etc/hosts is properly mounted, then this will
// succeed immediately.
const retryTimeout = 30 * time.Second
retryCount := 0
etcHostsContent := ""
for startTime := time.Now(); time.Since(startTime) < retryTimeout; {
etcHostsContent = config.getEtcHostsContent(podName, name)
isManaged := strings.Contains(etcHostsContent, etcHostsPartialContent)
if expectedIsManaged == isManaged {
return
}
glog.Warningf(
"For pod: %s, name: %s, expected %t, actual %t (/etc/hosts was %q), retryCount: %d",
podName, name, expectedIsManaged, isManaged, etcHostsContent, retryCount)
retryCount++
time.Sleep(100 * time.Millisecond)
}
if expectedIsManaged {
framework.Failf(
"/etc/hosts file should be kubelet managed (name: %s, retries: %d). /etc/hosts contains %q",
name, retryCount, etcHostsContent)
} else {
framework.Failf(
"/etc/hosts file should no be kubelet managed (name: %s, retries: %d). /etc/hosts contains %q",
name, retryCount, etcHostsContent)
}
}
func (config *KubeletManagedHostConfig) getEtcHostsContent(podName, containerName string) string {
return config.f.ExecCommandInContainer(podName, containerName, "cat", "/etc/hosts")
}
func (config *KubeletManagedHostConfig) createPodSpec(podName string) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "busybox-1",
Image: etcHostsImageName,
ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"sleep",
"900",
},
},
{
Name: "busybox-2",
Image: etcHostsImageName,
ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"sleep",
"900",
},
},
{
Name: "busybox-3",
Image: etcHostsImageName,
ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"sleep",
"900",
},
VolumeMounts: []v1.VolumeMount{
{
Name: "host-etc-hosts",
MountPath: "/etc/hosts",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "host-etc-hosts",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/hosts",
},
},
},
},
},
}
return pod
}
func (config *KubeletManagedHostConfig) createPodSpecWithHostNetwork(podName string) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
HostNetwork: true,
SecurityContext: &v1.PodSecurityContext{},
Containers: []v1.Container{
{
Name: "busybox-1",
Image: etcHostsImageName,
ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"sleep",
"900",
},
},
{
Name: "busybox-2",
Image: etcHostsImageName,
ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"sleep",
"900",
},
},
},
},
}
return pod
}

61
vendor/k8s.io/kubernetes/test/e2e/common/networking.go generated vendored Normal file
View file

@ -0,0 +1,61 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
. "github.com/onsi/ginkgo"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework"
)
var _ = framework.KubeDescribe("Networking", func() {
f := framework.NewDefaultFramework("pod-network-test")
framework.KubeDescribe("Granular Checks: Pods", func() {
// Try to hit all endpoints through a test container, retry 5 times,
// expect exactly one unique hostname. Each of these endpoints reports
// its own hostname.
It("should function for intra-pod communication: http [Conformance]", func() {
config := framework.NewCoreNetworkingTestConfig(f)
for _, endpointPod := range config.EndpointPods {
config.DialFromTestContainer("http", endpointPod.Status.PodIP, framework.EndpointHttpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
}
})
It("should function for intra-pod communication: udp [Conformance]", func() {
config := framework.NewCoreNetworkingTestConfig(f)
for _, endpointPod := range config.EndpointPods {
config.DialFromTestContainer("udp", endpointPod.Status.PodIP, framework.EndpointUdpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
}
})
It("should function for node-pod communication: http [Conformance]", func() {
config := framework.NewCoreNetworkingTestConfig(f)
for _, endpointPod := range config.EndpointPods {
config.DialFromNode("http", endpointPod.Status.PodIP, framework.EndpointHttpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
}
})
It("should function for node-pod communication: udp [Conformance]", func() {
config := framework.NewCoreNetworkingTestConfig(f)
for _, endpointPod := range config.EndpointPods {
config.DialFromNode("udp", endpointPod.Status.PodIP, framework.EndpointUdpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
}
})
})
})

660
vendor/k8s.io/kubernetes/test/e2e/common/pods.go generated vendored Normal file

File diff suppressed because it is too large Load diff

114
vendor/k8s.io/kubernetes/test/e2e/common/privileged.go generated vendored Normal file
View file

@ -0,0 +1,114 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
)
type PrivilegedPodTestConfig struct {
f *framework.Framework
privilegedPod string
privilegedContainer string
notPrivilegedContainer string
pod *v1.Pod
}
var _ = framework.KubeDescribe("PrivilegedPod", func() {
config := &PrivilegedPodTestConfig{
f: framework.NewDefaultFramework("e2e-privileged-pod"),
privilegedPod: "privileged-pod",
privilegedContainer: "privileged-container",
notPrivilegedContainer: "not-privileged-container",
}
It("should enable privileged commands", func() {
By("Creating a pod with a privileged container")
config.createPods()
By("Executing in the privileged container")
config.run(config.privilegedContainer, true)
By("Executing in the non-privileged container")
config.run(config.notPrivilegedContainer, false)
})
})
func (c *PrivilegedPodTestConfig) run(containerName string, expectSuccess bool) {
cmd := []string{"ip", "link", "add", "dummy1", "type", "dummy"}
reverseCmd := []string{"ip", "link", "del", "dummy1"}
stdout, stderr, err := c.f.ExecCommandInContainerWithFullOutput(
c.privilegedPod, containerName, cmd...)
msg := fmt.Sprintf("cmd %v, stdout %q, stderr %q", cmd, stdout, stderr)
if expectSuccess {
Expect(err).NotTo(HaveOccurred(), msg)
// We need to clean up the dummy link that was created, as it
// leaks out into the node level -- yuck.
_, _, err := c.f.ExecCommandInContainerWithFullOutput(
c.privilegedPod, containerName, reverseCmd...)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("could not remove dummy1 link: %v", err))
} else {
Expect(err).To(HaveOccurred(), msg)
}
}
func (c *PrivilegedPodTestConfig) createPodsSpec() *v1.Pod {
isPrivileged := true
notPrivileged := false
const image = "gcr.io/google_containers/busybox:1.24"
return &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: c.privilegedPod,
Namespace: c.f.Namespace.Name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: c.privilegedContainer,
Image: image,
ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: &v1.SecurityContext{Privileged: &isPrivileged},
Command: []string{"/bin/sleep", "10000"},
},
{
Name: c.notPrivilegedContainer,
Image: image,
ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: &v1.SecurityContext{Privileged: &notPrivileged},
Command: []string{"/bin/sleep", "10000"},
},
},
},
}
}
func (c *PrivilegedPodTestConfig) createPods() {
podSpec := c.createPodsSpec()
c.pod = c.f.PodClient().CreateSync(podSpec)
}

336
vendor/k8s.io/kubernetes/test/e2e/common/secrets.go generated vendored Normal file
View file

@ -0,0 +1,336 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"os"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
var _ = framework.KubeDescribe("Secrets", func() {
f := framework.NewDefaultFramework("secrets")
It("should be consumable from pods in volume [Conformance]", func() {
doSecretE2EWithoutMapping(f, nil /* default mode */, "secret-test-"+string(uuid.NewUUID()))
})
It("should be consumable from pods in volume with defaultMode set [Conformance]", func() {
defaultMode := int32(0400)
doSecretE2EWithoutMapping(f, &defaultMode, "secret-test-"+string(uuid.NewUUID()))
})
It("should be consumable from pods in volume with mappings [Conformance]", func() {
doSecretE2EWithMapping(f, nil)
})
It("should be consumable from pods in volume with mappings and Item Mode set [Conformance]", func() {
mode := int32(0400)
doSecretE2EWithMapping(f, &mode)
})
It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace", func() {
var (
namespace2 *v1.Namespace
err error
secret2Name = "secret-test-" + string(uuid.NewUUID())
)
if namespace2, err = f.CreateNamespace("secret-namespace", nil); err != nil {
framework.Failf("unable to create new namespace %s: %v", namespace2.Name, err)
}
secret2 := secretForTest(namespace2.Name, secret2Name)
secret2.Data = map[string][]byte{
"this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"),
}
if secret2, err = f.ClientSet.Core().Secrets(namespace2.Name).Create(secret2); err != nil {
framework.Failf("unable to create test secret %s: %v", secret2.Name, err)
}
doSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name)
})
It("should be consumable in multiple volumes in a pod [Conformance]", func() {
// This test ensures that the same secret can be mounted in multiple
// volumes in the same pod. This test case exists to prevent
// regressions that break this use-case.
var (
name = "secret-test-" + string(uuid.NewUUID())
volumeName = "secret-volume"
volumeMountPath = "/etc/secret-volume"
volumeName2 = "secret-volume-2"
volumeMountPath2 = "/etc/secret-volume-2"
secret = secretForTest(f.Namespace.Name, name)
)
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: name,
},
},
},
{
Name: volumeName2,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: name,
},
},
},
},
Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{
"--file_content=/etc/secret-volume/data-1",
"--file_mode=/etc/secret-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
ReadOnly: true,
},
{
Name: volumeName2,
MountPath: volumeMountPath2,
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("consume secrets", pod, 0, []string{
"content of file \"/etc/secret-volume/data-1\": value-1",
"mode of file \"/etc/secret-volume/data-1\": -rw-r--r--",
})
})
It("should be consumable from pods in env vars [Conformance]", func() {
name := "secret-test-" + string(uuid.NewUUID())
secret := secretForTest(f.Namespace.Name, name)
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "secret-env-test",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "env"},
Env: []v1.EnvVar{
{
Name: "SECRET_DATA",
ValueFrom: &v1.EnvVarSource{
SecretKeyRef: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "data-1",
},
},
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("consume secrets", pod, 0, []string{
"SECRET_DATA=value-1",
})
})
})
func secretForTest(namespace, name string) *v1.Secret {
return &v1.Secret{
ObjectMeta: v1.ObjectMeta{
Namespace: namespace,
Name: name,
},
Data: map[string][]byte{
"data-1": []byte("value-1\n"),
"data-2": []byte("value-2\n"),
"data-3": []byte("value-3\n"),
},
}
}
func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secretName string) {
var (
volumeName = "secret-volume"
volumeMountPath = "/etc/secret-volume"
secret = secretForTest(f.Namespace.Name, secretName)
)
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()),
Namespace: f.Namespace.Name,
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: secretName,
},
},
},
},
Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{
"--file_content=/etc/secret-volume/data-1",
"--file_mode=/etc/secret-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
if defaultMode != nil {
pod.Spec.Volumes[0].VolumeSource.Secret.DefaultMode = defaultMode
} else {
mode := int32(0644)
defaultMode = &mode
}
modeString := fmt.Sprintf("%v", os.FileMode(*defaultMode))
expectedOutput := []string{
"content of file \"/etc/secret-volume/data-1\": value-1",
"mode of file \"/etc/secret-volume/data-1\": " + modeString,
}
f.TestContainerOutput("consume secrets", pod, 0, expectedOutput)
}
func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
var (
name = "secret-test-map-" + string(uuid.NewUUID())
volumeName = "secret-volume"
volumeMountPath = "/etc/secret-volume"
secret = secretForTest(f.Namespace.Name, name)
)
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: name,
Items: []v1.KeyToPath{
{
Key: "data-1",
Path: "new-path-data-1",
},
},
},
},
},
},
Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{
"--file_content=/etc/secret-volume/new-path-data-1",
"--file_mode=/etc/secret-volume/new-path-data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
if mode != nil {
pod.Spec.Volumes[0].VolumeSource.Secret.Items[0].Mode = mode
} else {
defaultItemMode := int32(0644)
mode = &defaultItemMode
}
modeString := fmt.Sprintf("%v", os.FileMode(*mode))
expectedOutput := []string{
"content of file \"/etc/secret-volume/new-path-data-1\": value-1",
"mode of file \"/etc/secret-volume/new-path-data-1\": " + modeString,
}
f.TestContainerOutput("consume secrets", pod, 0, expectedOutput)
}

233
vendor/k8s.io/kubernetes/test/e2e/common/sysctl.go generated vendored Normal file
View file

@ -0,0 +1,233 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/sysctl"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("Sysctls", func() {
f := framework.NewDefaultFramework("sysctl")
var podClient *framework.PodClient
testPod := func() *v1.Pod {
podName := "sysctl-" + string(uuid.NewUUID())
pod := v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
Annotations: map[string]string{},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "test-container",
Image: "gcr.io/google_containers/busybox:1.24",
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
return &pod
}
waitForPodErrorEventOrStarted := func(pod *v1.Pod) (*v1.Event, error) {
var ev *v1.Event
err := wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) {
evnts, err := f.ClientSet.Core().Events(pod.Namespace).Search(pod)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
}
for _, e := range evnts.Items {
switch e.Reason {
case sysctl.UnsupportedReason, sysctl.ForbiddenReason:
ev = &e
return true, nil
case events.StartedContainer:
return true, nil
}
}
return false, nil
})
return ev, err
}
BeforeEach(func() {
podClient = f.PodClient()
})
It("should support sysctls", func() {
pod := testPod()
pod.Annotations[v1.SysctlsPodAnnotationKey] = v1.PodAnnotationsFromSysctls([]v1.Sysctl{
{
Name: "kernel.shm_rmid_forced",
Value: "1",
},
})
pod.Spec.Containers[0].Command = []string{"/bin/sysctl", "kernel.shm_rmid_forced"}
By("Creating a pod with the kernel.shm_rmid_forced sysctl")
pod = podClient.Create(pod)
By("Watching for error events or started pod")
// watch for events instead of termination of pod because the kubelet deletes
// failed pods without running containers. This would create a race as the pod
// might have already been deleted here.
ev, err := waitForPodErrorEventOrStarted(pod)
Expect(err).NotTo(HaveOccurred())
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12")
}
Expect(ev).To(BeNil())
By("Waiting for pod completion")
err = f.WaitForPodNoLongerRunning(pod.Name)
Expect(err).NotTo(HaveOccurred())
pod, err = podClient.Get(pod.Name)
Expect(err).NotTo(HaveOccurred())
By("Checking that the pod succeeded")
Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded))
By("Getting logs from the pod")
log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
Expect(err).NotTo(HaveOccurred())
By("Checking that the sysctl is actually updated")
Expect(log).To(ContainSubstring("kernel.shm_rmid_forced = 1"))
})
It("should support unsafe sysctls which are actually whitelisted", func() {
pod := testPod()
pod.Annotations[v1.UnsafeSysctlsPodAnnotationKey] = v1.PodAnnotationsFromSysctls([]v1.Sysctl{
{
Name: "kernel.shm_rmid_forced",
Value: "1",
},
})
pod.Spec.Containers[0].Command = []string{"/bin/sysctl", "kernel.shm_rmid_forced"}
By("Creating a pod with the kernel.shm_rmid_forced sysctl")
pod = podClient.Create(pod)
By("Watching for error events or started pod")
// watch for events instead of termination of pod because the kubelet deletes
// failed pods without running containers. This would create a race as the pod
// might have already been deleted here.
ev, err := waitForPodErrorEventOrStarted(pod)
Expect(err).NotTo(HaveOccurred())
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12")
}
Expect(ev).To(BeNil())
By("Waiting for pod completion")
err = f.WaitForPodNoLongerRunning(pod.Name)
Expect(err).NotTo(HaveOccurred())
pod, err = podClient.Get(pod.Name)
Expect(err).NotTo(HaveOccurred())
By("Checking that the pod succeeded")
Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded))
By("Getting logs from the pod")
log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
Expect(err).NotTo(HaveOccurred())
By("Checking that the sysctl is actually updated")
Expect(log).To(ContainSubstring("kernel.shm_rmid_forced = 1"))
})
It("should reject invalid sysctls", func() {
pod := testPod()
pod.Annotations[v1.SysctlsPodAnnotationKey] = v1.PodAnnotationsFromSysctls([]v1.Sysctl{
{
Name: "foo-",
Value: "bar",
},
{
Name: "kernel.shmmax",
Value: "100000000",
},
{
Name: "safe-and-unsafe",
Value: "100000000",
},
})
pod.Annotations[v1.UnsafeSysctlsPodAnnotationKey] = v1.PodAnnotationsFromSysctls([]v1.Sysctl{
{
Name: "kernel.shmall",
Value: "100000000",
},
{
Name: "bar..",
Value: "42",
},
{
Name: "safe-and-unsafe",
Value: "100000000",
},
})
By("Creating a pod with one valid and two invalid sysctls")
client := f.ClientSet.Core().Pods(f.Namespace.Name)
_, err := client.Create(pod)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(ContainSubstring(`Invalid value: "foo-"`))
Expect(err.Error()).To(ContainSubstring(`Invalid value: "bar.."`))
Expect(err.Error()).To(ContainSubstring(`safe-and-unsafe`))
Expect(err.Error()).NotTo(ContainSubstring("kernel.shmmax"))
})
It("should not launch unsafe, but not explicitly enabled sysctls on the node", func() {
pod := testPod()
pod.Annotations[v1.SysctlsPodAnnotationKey] = v1.PodAnnotationsFromSysctls([]v1.Sysctl{
{
Name: "kernel.msgmax",
Value: "10000000000",
},
})
By("Creating a pod with a greylisted, but not whitelisted sysctl on the node")
pod = podClient.Create(pod)
By("Watching for error events or started pod")
// watch for events instead of termination of pod because the kubelet deletes
// failed pods without running containers. This would create a race as the pod
// might have already been deleted here.
ev, err := waitForPodErrorEventOrStarted(pod)
Expect(err).NotTo(HaveOccurred())
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12")
}
By("Checking that the pod was rejected")
Expect(ev).ToNot(BeNil())
Expect(ev.Reason).To(Equal("SysctlForbidden"))
})
})

50
vendor/k8s.io/kubernetes/test/e2e/common/util.go generated vendored Normal file
View file

@ -0,0 +1,50 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"k8s.io/kubernetes/pkg/util/sets"
)
type Suite string
const (
E2E Suite = "e2e"
NodeE2E Suite = "node e2e"
)
var CurrentSuite Suite
// CommonImageWhiteList is the list of images used in common test. These images should be prepulled
// before a tests starts, so that the tests won't fail due image pulling flakes. Currently, this is
// only used by node e2e test.
// TODO(random-liu): Change the image puller pod to use similar mechanism.
var CommonImageWhiteList = sets.NewString(
"gcr.io/google_containers/busybox:1.24",
"gcr.io/google_containers/eptest:0.1",
"gcr.io/google_containers/liveness:e2e",
"gcr.io/google_containers/mounttest:0.7",
"gcr.io/google_containers/mounttest-user:0.3",
"gcr.io/google_containers/netexec:1.4",
"gcr.io/google_containers/netexec:1.5",
"gcr.io/google_containers/nginx-slim:0.7",
"gcr.io/google_containers/serve_hostname:v1.4",
"gcr.io/google_containers/test-webserver:e2e",
"gcr.io/google_containers/hostexec:1.2",
"gcr.io/google_containers/volume-nfs:0.8",
"gcr.io/google_containers/volume-gluster:0.2",
)

499
vendor/k8s.io/kubernetes/test/e2e/common/volumes.go generated vendored Normal file
View file

@ -0,0 +1,499 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* This test checks that various VolumeSources are working.
*
* There are two ways, how to test the volumes:
* 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...)
* The test creates a server pod, exporting simple 'index.html' file.
* Then it uses appropriate VolumeSource to import this file into a client pod
* and checks that the pod can see the file. It does so by importing the file
* into web server root and loadind the index.html from it.
*
* These tests work only when privileged containers are allowed, exporting
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or
* other privileged magic in the server pod.
*
* Note that the server containers are for testing purposes only and should not
* be used in production.
*
* 2) With server outside of Kubernetes (Cinder, ...)
* Appropriate server (e.g. OpenStack Cinder) must exist somewhere outside
* the tested Kubernetes cluster. The test itself creates a new volume,
* and checks, that Kubernetes can use it as a volume.
*/
package common
import (
"fmt"
"os/exec"
"regexp"
"strconv"
"strings"
"time"
apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/test/e2e/framework"
"github.com/golang/glog"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
// Configuration of one tests. The test consist of:
// - server pod - runs serverImage, exports ports[]
// - client pod - does not need any special configuration
type VolumeTestConfig struct {
namespace string
// Prefix of all pods. Typically the test name.
prefix string
// Name of container image for the server pod.
serverImage string
// Ports to export from the server pod. TCP only.
serverPorts []int
// Arguments to pass to the container image.
serverArgs []string
// Volumes needed to be mounted to the server container from the host
// map <host (source) path> -> <container (dst.) path>
volumes map[string]string
}
// Starts a container specified by config.serverImage and exports all
// config.serverPorts from it. The returned pod should be used to get the server
// IP address and create appropriate VolumeSource.
func startVolumeServer(f *framework.Framework, config VolumeTestConfig) *v1.Pod {
podClient := f.PodClient()
portCount := len(config.serverPorts)
serverPodPorts := make([]v1.ContainerPort, portCount)
for i := 0; i < portCount; i++ {
portName := fmt.Sprintf("%s-%d", config.prefix, i)
serverPodPorts[i] = v1.ContainerPort{
Name: portName,
ContainerPort: int32(config.serverPorts[i]),
Protocol: v1.ProtocolTCP,
}
}
volumeCount := len(config.volumes)
volumes := make([]v1.Volume, volumeCount)
mounts := make([]v1.VolumeMount, volumeCount)
i := 0
for src, dst := range config.volumes {
mountName := fmt.Sprintf("path%d", i)
volumes[i].Name = mountName
volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
Path: src,
}
mounts[i].Name = mountName
mounts[i].ReadOnly = false
mounts[i].MountPath = dst
i++
}
By(fmt.Sprint("creating ", config.prefix, " server pod"))
privileged := new(bool)
*privileged = true
serverPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: v1.ObjectMeta{
Name: config.prefix + "-server",
Labels: map[string]string{
"role": config.prefix + "-server",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: config.prefix + "-server",
Image: config.serverImage,
SecurityContext: &v1.SecurityContext{
Privileged: privileged,
},
Args: config.serverArgs,
Ports: serverPodPorts,
VolumeMounts: mounts,
},
},
Volumes: volumes,
},
}
serverPod = podClient.CreateSync(serverPod)
By("locating the server pod")
pod, err := podClient.Get(serverPod.Name)
framework.ExpectNoError(err, "Cannot locate the server pod %v: %v", serverPod.Name, err)
By("sleeping a bit to give the server time to start")
time.Sleep(20 * time.Second)
return pod
}
// Clean both server and client pods.
func volumeTestCleanup(f *framework.Framework, config VolumeTestConfig) {
By(fmt.Sprint("cleaning the environment after ", config.prefix))
defer GinkgoRecover()
podClient := f.PodClient()
err := podClient.Delete(config.prefix+"-client", nil)
if err != nil {
// Log the error before failing test: if the test has already failed,
// framework.ExpectNoError() won't print anything to logs!
glog.Warningf("Failed to delete client pod: %v", err)
framework.ExpectNoError(err, "Failed to delete client pod: %v", err)
}
if config.serverImage != "" {
if err := f.WaitForPodTerminated(config.prefix+"-client", ""); !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
}
// See issue #24100.
// Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits.
By("sleeping a bit so client can stop and unmount")
time.Sleep(20 * time.Second)
err = podClient.Delete(config.prefix+"-server", nil)
if err != nil {
glog.Warningf("Failed to delete server pod: %v", err)
framework.ExpectNoError(err, "Failed to delete server pod: %v", err)
}
}
}
// Start a client pod using given VolumeSource (exported by startVolumeServer())
// and check that the pod sees the data from the server pod.
func testVolumeClient(f *framework.Framework, config VolumeTestConfig, volume v1.VolumeSource, fsGroup *int64, expectedContent string) {
By(fmt.Sprint("starting ", config.prefix, " client"))
clientPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: v1.ObjectMeta{
Name: config.prefix + "-client",
Labels: map[string]string{
"role": config.prefix + "-client",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: config.prefix + "-client",
Image: "gcr.io/google_containers/busybox:1.24",
WorkingDir: "/opt",
// An imperative and easily debuggable container which reads vol contents for
// us to scan in the tests or by eye.
// We expect that /opt is empty in the minimal containers which we use in this test.
Command: []string{
"/bin/sh",
"-c",
"while true ; do cat /opt/index.html ; sleep 2 ; ls -altrh /opt/ ; sleep 2 ; done ",
},
VolumeMounts: []v1.VolumeMount{
{
Name: config.prefix + "-volume",
MountPath: "/opt/",
},
},
},
},
SecurityContext: &v1.PodSecurityContext{
SELinuxOptions: &v1.SELinuxOptions{
Level: "s0:c0,c1",
},
},
Volumes: []v1.Volume{
{
Name: config.prefix + "-volume",
VolumeSource: volume,
},
},
},
}
podClient := f.PodClient()
if fsGroup != nil {
clientPod.Spec.SecurityContext.FSGroup = fsGroup
}
clientPod = podClient.CreateSync(clientPod)
By("Checking that text file contents are perfect.")
result := f.ExecCommandInPod(clientPod.Name, "cat", "/opt/index.html")
var err error
if !strings.Contains(result, expectedContent) {
err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedContent, result)
}
Expect(err).NotTo(HaveOccurred(), "failed: finding the contents of the mounted file.")
if fsGroup != nil {
By("Checking fsGroup is correct.")
_, err := framework.LookForStringInPodExec(config.namespace, clientPod.Name, []string{"ls", "-ld", "/opt"}, strconv.Itoa(int(*fsGroup)), time.Minute)
Expect(err).NotTo(HaveOccurred(), "failed: getting the right priviliges in the file %v", int(*fsGroup))
}
}
// Insert index.html with given content into given volume. It does so by
// starting and auxiliary pod which writes the file there.
// The volume must be writable.
func injectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.VolumeSource, content string) {
By(fmt.Sprint("starting ", config.prefix, " injector"))
podClient := client.Core().Pods(config.namespace)
injectPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: v1.ObjectMeta{
Name: config.prefix + "-injector",
Labels: map[string]string{
"role": config.prefix + "-injector",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: config.prefix + "-injector",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh"},
Args: []string{"-c", "echo '" + content + "' > /mnt/index.html && chmod o+rX /mnt /mnt/index.html"},
VolumeMounts: []v1.VolumeMount{
{
Name: config.prefix + "-volume",
MountPath: "/mnt",
},
},
},
},
SecurityContext: &v1.PodSecurityContext{
SELinuxOptions: &v1.SELinuxOptions{
Level: "s0:c0,c1",
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: config.prefix + "-volume",
VolumeSource: volume,
},
},
},
}
defer func() {
podClient.Delete(config.prefix+"-injector", nil)
}()
injectPod, err := podClient.Create(injectPod)
framework.ExpectNoError(err, "Failed to create injector pod: %v", err)
err = framework.WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Namespace)
Expect(err).NotTo(HaveOccurred())
}
func deleteCinderVolume(name string) error {
// Try to delete the volume for several seconds - it takes
// a while for the plugin to detach it.
var output []byte
var err error
timeout := time.Second * 120
framework.Logf("Waiting up to %v for removal of cinder volume %s", timeout, name)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
output, err = exec.Command("cinder", "delete", name).CombinedOutput()
if err == nil {
framework.Logf("Cinder volume %s deleted", name)
return nil
} else {
framework.Logf("Failed to delete volume %s: %v", name, err)
}
}
framework.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:]))
return err
}
// These tests need privileged containers, which are disabled by default. Run
// the test with "go run hack/e2e.go ... --ginkgo.focus=[Feature:Volumes]"
var _ = framework.KubeDescribe("GCP Volumes", func() {
f := framework.NewDefaultFramework("gcp-volume")
// If 'false', the test won't clear its volumes upon completion. Useful for debugging,
// note that namespace deletion is handled by delete-namespace flag
clean := true
// filled in BeforeEach
var namespace *v1.Namespace
BeforeEach(func() {
if !isTestEnabled(f.ClientSet) {
framework.Skipf("NFS tests are not supported for this distro")
}
namespace = f.Namespace
})
////////////////////////////////////////////////////////////////////////
// NFS
////////////////////////////////////////////////////////////////////////
framework.KubeDescribe("NFSv4", func() {
It("should be mountable for NFSv4", func() {
config := VolumeTestConfig{
namespace: namespace.Name,
prefix: "nfs",
serverImage: "gcr.io/google_containers/volume-nfs:0.8",
serverPorts: []int{2049},
}
defer func() {
if clean {
volumeTestCleanup(f, config)
}
}()
pod := startVolumeServer(f, config)
serverIP := pod.Status.PodIP
framework.Logf("NFS server IP address: %v", serverIP)
volume := v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
Server: serverIP,
Path: "/",
ReadOnly: true,
},
}
// Must match content of test/images/volumes-tester/nfs/index.html
testVolumeClient(f, config, volume, nil, "Hello from NFS!")
})
})
////////////////////////////////////////////////////////////////////////
// Gluster
////////////////////////////////////////////////////////////////////////
framework.KubeDescribe("GlusterFS", func() {
It("should be mountable", func() {
config := VolumeTestConfig{
namespace: namespace.Name,
prefix: "gluster",
serverImage: "gcr.io/google_containers/volume-gluster:0.2",
serverPorts: []int{24007, 24008, 49152},
}
defer func() {
if clean {
volumeTestCleanup(f, config)
}
}()
pod := startVolumeServer(f, config)
serverIP := pod.Status.PodIP
framework.Logf("Gluster server IP address: %v", serverIP)
// create Endpoints for the server
endpoints := v1.Endpoints{
TypeMeta: metav1.TypeMeta{
Kind: "Endpoints",
APIVersion: "v1",
},
ObjectMeta: v1.ObjectMeta{
Name: config.prefix + "-server",
},
Subsets: []v1.EndpointSubset{
{
Addresses: []v1.EndpointAddress{
{
IP: serverIP,
},
},
Ports: []v1.EndpointPort{
{
Name: "gluster",
Port: 24007,
Protocol: v1.ProtocolTCP,
},
},
},
},
}
endClient := f.ClientSet.Core().Endpoints(config.namespace)
defer func() {
if clean {
endClient.Delete(config.prefix+"-server", nil)
}
}()
if _, err := endClient.Create(&endpoints); err != nil {
framework.Failf("Failed to create endpoints for Gluster server: %v", err)
}
volume := v1.VolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{
EndpointsName: config.prefix + "-server",
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
Path: "test_vol",
ReadOnly: true,
},
}
// Must match content of test/images/volumes-tester/gluster/index.html
testVolumeClient(f, config, volume, nil, "Hello from GlusterFS!")
})
})
})
func isTestEnabled(c clientset.Interface) bool {
// Enable the test on node e2e if the node image is GCI.
nodeName := framework.TestContext.NodeName
if nodeName != "" {
if strings.Contains(nodeName, "-gci-dev-") {
gciVersionRe := regexp.MustCompile("-gci-dev-([0-9]+)-")
matches := gciVersionRe.FindStringSubmatch(framework.TestContext.NodeName)
if len(matches) == 2 {
version, err := strconv.Atoi(matches[1])
if err != nil {
glog.Errorf("Error parsing GCI version from NodeName %q: %v", nodeName, err)
return false
}
return version >= 54
}
}
return false
}
// For cluster e2e test, because nodeName is empty, retrieve the node objects from api server
// and check their images. Only run NFSv4 and GlusterFS if nodes are using GCI image for now.
nodes := framework.GetReadySchedulableNodesOrDie(c)
for _, node := range nodes.Items {
if !strings.Contains(node.Status.NodeInfo.OSImage, "Google Container-VM") {
return false
}
}
return true
}