Add glide.yaml and vendor deps
This commit is contained in:
parent
db918f12ad
commit
5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions
89
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD
generated
vendored
Normal file
89
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"convert.go",
|
||||
"doc.go",
|
||||
"docker_container.go",
|
||||
"docker_image.go",
|
||||
"docker_sandbox.go",
|
||||
"docker_service.go",
|
||||
"docker_streaming.go",
|
||||
"helpers.go",
|
||||
"naming.go",
|
||||
"security_context.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/kubelet/api:go_default_library",
|
||||
"//pkg/kubelet/api/v1alpha1/runtime:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/dockershim/cm:go_default_library",
|
||||
"//pkg/kubelet/dockertools:go_default_library",
|
||||
"//pkg/kubelet/leaky:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
"//pkg/kubelet/network/cni:go_default_library",
|
||||
"//pkg/kubelet/network/kubenet:go_default_library",
|
||||
"//pkg/kubelet/qos:go_default_library",
|
||||
"//pkg/kubelet/server/streaming:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/kubelet/util/ioutils:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
"//pkg/util/term:go_default_library",
|
||||
"//vendor:github.com/docker/engine-api/types",
|
||||
"//vendor:github.com/docker/engine-api/types/container",
|
||||
"//vendor:github.com/docker/engine-api/types/filters",
|
||||
"//vendor:github.com/docker/engine-api/types/strslice",
|
||||
"//vendor:github.com/docker/engine-api/types/versions",
|
||||
"//vendor:github.com/docker/go-connections/nat",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/golang/protobuf/proto",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"convert_test.go",
|
||||
"docker_container_test.go",
|
||||
"docker_image_test.go",
|
||||
"docker_sandbox_test.go",
|
||||
"docker_service_test.go",
|
||||
"helpers_test.go",
|
||||
"naming_test.go",
|
||||
"security_context_test.go",
|
||||
],
|
||||
library = "go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/kubelet/api/v1alpha1/runtime:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/container/testing:go_default_library",
|
||||
"//pkg/kubelet/dockertools:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
"//pkg/kubelet/network/mock_network:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
"//pkg/util/clock:go_default_library",
|
||||
"//vendor:github.com/docker/engine-api/types",
|
||||
"//vendor:github.com/docker/engine-api/types/container",
|
||||
"//vendor:github.com/golang/mock/gomock",
|
||||
"//vendor:github.com/stretchr/testify/assert",
|
||||
"//vendor:github.com/stretchr/testify/require",
|
||||
],
|
||||
)
|
||||
30
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/BUILD
generated
vendored
Normal file
30
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"container_manager.go",
|
||||
"container_manager_linux.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/kubelet/cm:go_default_library",
|
||||
"//pkg/kubelet/dockertools:go_default_library",
|
||||
"//pkg/kubelet/qos:go_default_library",
|
||||
"//pkg/util/wait:go_default_library",
|
||||
"//vendor:github.com/blang/semver",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/opencontainers/runc/libcontainer/cgroups/fs",
|
||||
"//vendor:github.com/opencontainers/runc/libcontainer/configs",
|
||||
],
|
||||
)
|
||||
21
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager.go
generated
vendored
Normal file
21
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager.go
generated
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cm
|
||||
|
||||
type ContainerManager interface {
|
||||
Start() error
|
||||
}
|
||||
147
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager_linux.go
generated
vendored
Normal file
147
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager_linux.go
generated
vendored
Normal file
|
|
@ -0,0 +1,147 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/golang/glog"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
kubecm "k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
)
|
||||
|
||||
const (
|
||||
// The percent of the machine memory capacity.
|
||||
dockerMemoryLimitThresholdPercent = kubecm.DockerMemoryLimitThresholdPercent
|
||||
|
||||
// The minimum memory limit allocated to docker container.
|
||||
minDockerMemoryLimit = kubecm.MinDockerMemoryLimit
|
||||
|
||||
// The Docker OOM score adjustment.
|
||||
dockerOOMScoreAdj = qos.DockerOOMScoreAdj
|
||||
)
|
||||
|
||||
var (
|
||||
memoryCapacityRegexp = regexp.MustCompile(`MemTotal:\s*([0-9]+) kB`)
|
||||
)
|
||||
|
||||
func NewContainerManager(cgroupsName string, client dockertools.DockerInterface) ContainerManager {
|
||||
return &containerManager{
|
||||
cgroupsName: cgroupsName,
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
type containerManager struct {
|
||||
// Docker client.
|
||||
client dockertools.DockerInterface
|
||||
// Name of the cgroups.
|
||||
cgroupsName string
|
||||
// Manager for the cgroups.
|
||||
cgroupsManager *fs.Manager
|
||||
}
|
||||
|
||||
func (m *containerManager) Start() error {
|
||||
// TODO: check if the required cgroups are mounted.
|
||||
if len(m.cgroupsName) != 0 {
|
||||
manager, err := createCgroupManager(m.cgroupsName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.cgroupsManager = manager
|
||||
}
|
||||
go wait.Until(m.doWork, 5*time.Minute, wait.NeverStop)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *containerManager) doWork() {
|
||||
v, err := m.client.Version()
|
||||
if err != nil {
|
||||
glog.Errorf("Unable to get docker version: %v", err)
|
||||
return
|
||||
}
|
||||
version, err := semver.Parse(v.Version)
|
||||
if err != nil {
|
||||
glog.Errorf("Unable to parse docker version %q: %v", v.Version, err)
|
||||
return
|
||||
}
|
||||
// EnsureDockerInConatiner does two things.
|
||||
// 1. Ensure processes run in the cgroups if m.cgroupsManager is not nil.
|
||||
// 2. Ensure processes have the OOM score applied.
|
||||
if err := kubecm.EnsureDockerInContainer(version, dockerOOMScoreAdj, m.cgroupsManager); err != nil {
|
||||
glog.Errorf("Unable to ensure the docker processes run in the desired containers")
|
||||
}
|
||||
}
|
||||
|
||||
func createCgroupManager(name string) (*fs.Manager, error) {
|
||||
var memoryLimit uint64
|
||||
memoryCapacity, err := getMemoryCapacity()
|
||||
if err != nil || memoryCapacity*dockerMemoryLimitThresholdPercent/100 < minDockerMemoryLimit {
|
||||
memoryLimit = minDockerMemoryLimit
|
||||
}
|
||||
glog.V(2).Infof("Configure resource-only container %q with memory limit: %d", name, memoryLimit)
|
||||
|
||||
allowAllDevices := true
|
||||
cm := &fs.Manager{
|
||||
Cgroups: &configs.Cgroup{
|
||||
Parent: "/",
|
||||
Name: name,
|
||||
Resources: &configs.Resources{
|
||||
Memory: int64(memoryLimit),
|
||||
MemorySwap: -1,
|
||||
AllowAllDevices: &allowAllDevices,
|
||||
},
|
||||
},
|
||||
}
|
||||
return cm, nil
|
||||
}
|
||||
|
||||
// getMemoryCapacity returns the memory capacity on the machine in bytes.
|
||||
func getMemoryCapacity() (uint64, error) {
|
||||
out, err := ioutil.ReadFile("/proc/meminfo")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return parseCapacity(out, memoryCapacityRegexp)
|
||||
}
|
||||
|
||||
// parseCapacity matches a Regexp in a []byte, returning the resulting value in bytes.
|
||||
// Assumes that the value matched by the Regexp is in KB.
|
||||
func parseCapacity(b []byte, r *regexp.Regexp) (uint64, error) {
|
||||
matches := r.FindSubmatch(b)
|
||||
if len(matches) != 2 {
|
||||
return 0, fmt.Errorf("failed to match regexp in output: %q", string(b))
|
||||
}
|
||||
m, err := strconv.ParseUint(string(matches[1]), 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Convert to bytes.
|
||||
return m * 1024, err
|
||||
}
|
||||
36
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager_unsupported.go
generated
vendored
Normal file
36
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager_unsupported.go
generated
vendored
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
// +build !linux
|
||||
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
)
|
||||
|
||||
type unsupportedContainerManager struct {
|
||||
}
|
||||
|
||||
func NewContainerManager(_ string, _ dockertools.DockerInterface) ContainerManager {
|
||||
return &unsupportedContainerManager{}
|
||||
}
|
||||
|
||||
func (m *unsupportedContainerManager) Start() error {
|
||||
return fmt.Errorf("Container Manager is unsupported in this build")
|
||||
}
|
||||
167
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/convert.go
generated
vendored
Normal file
167
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/convert.go
generated
vendored
Normal file
|
|
@ -0,0 +1,167 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockershim
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
dockertypes "github.com/docker/engine-api/types"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// This file contains helper functions to convert docker API types to runtime
|
||||
// API types, or vice versa.
|
||||
|
||||
const (
|
||||
// Status of a container returned by docker ListContainers
|
||||
statusRunningPrefix = "Up"
|
||||
statusCreatedPrefix = "Created"
|
||||
statusExitedPrefix = "Exited"
|
||||
)
|
||||
|
||||
func imageToRuntimeAPIImage(image *dockertypes.Image) (*runtimeapi.Image, error) {
|
||||
if image == nil {
|
||||
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime API image")
|
||||
}
|
||||
|
||||
size := uint64(image.VirtualSize)
|
||||
return &runtimeapi.Image{
|
||||
Id: &image.ID,
|
||||
RepoTags: image.RepoTags,
|
||||
RepoDigests: image.RepoDigests,
|
||||
Size_: &size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func imageInspectToRuntimeAPIImage(image *dockertypes.ImageInspect) (*runtimeapi.Image, error) {
|
||||
if image == nil {
|
||||
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime API image")
|
||||
}
|
||||
|
||||
size := uint64(image.VirtualSize)
|
||||
runtimeImage := &runtimeapi.Image{
|
||||
Id: &image.ID,
|
||||
RepoTags: image.RepoTags,
|
||||
RepoDigests: image.RepoDigests,
|
||||
Size_: &size,
|
||||
}
|
||||
|
||||
runtimeImage.Uid, runtimeImage.Username = getUserFromImageUser(image.Config.User)
|
||||
return runtimeImage, nil
|
||||
}
|
||||
|
||||
func toPullableImageID(id string, image *dockertypes.ImageInspect) string {
|
||||
// Default to the image ID, but if RepoDigests is not empty, use
|
||||
// the first digest instead.
|
||||
imageID := DockerImageIDPrefix + id
|
||||
if len(image.RepoDigests) > 0 {
|
||||
imageID = DockerPullableImageIDPrefix + image.RepoDigests[0]
|
||||
}
|
||||
return imageID
|
||||
}
|
||||
|
||||
func toRuntimeAPIContainer(c *dockertypes.Container) (*runtimeapi.Container, error) {
|
||||
state := toRuntimeAPIContainerState(c.Status)
|
||||
if len(c.Names) == 0 {
|
||||
return nil, fmt.Errorf("unexpected empty container name: %+v", c)
|
||||
}
|
||||
metadata, err := parseContainerName(c.Names[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
labels, annotations := extractLabels(c.Labels)
|
||||
sandboxID := c.Labels[sandboxIDLabelKey]
|
||||
// The timestamp in dockertypes.Container is in seconds.
|
||||
createdAt := c.Created * int64(time.Second)
|
||||
return &runtimeapi.Container{
|
||||
Id: &c.ID,
|
||||
PodSandboxId: &sandboxID,
|
||||
Metadata: metadata,
|
||||
Image: &runtimeapi.ImageSpec{Image: &c.Image},
|
||||
ImageRef: &c.ImageID,
|
||||
State: &state,
|
||||
CreatedAt: &createdAt,
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func toDockerContainerStatus(state runtimeapi.ContainerState) string {
|
||||
switch state {
|
||||
case runtimeapi.ContainerState_CONTAINER_CREATED:
|
||||
return "created"
|
||||
case runtimeapi.ContainerState_CONTAINER_RUNNING:
|
||||
return "running"
|
||||
case runtimeapi.ContainerState_CONTAINER_EXITED:
|
||||
return "exited"
|
||||
case runtimeapi.ContainerState_CONTAINER_UNKNOWN:
|
||||
fallthrough
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
func toRuntimeAPIContainerState(state string) runtimeapi.ContainerState {
|
||||
// Parse the state string in dockertypes.Container. This could break when
|
||||
// we upgrade docker.
|
||||
switch {
|
||||
case strings.HasPrefix(state, statusRunningPrefix):
|
||||
return runtimeapi.ContainerState_CONTAINER_RUNNING
|
||||
case strings.HasPrefix(state, statusExitedPrefix):
|
||||
return runtimeapi.ContainerState_CONTAINER_EXITED
|
||||
case strings.HasPrefix(state, statusCreatedPrefix):
|
||||
return runtimeapi.ContainerState_CONTAINER_CREATED
|
||||
default:
|
||||
return runtimeapi.ContainerState_CONTAINER_UNKNOWN
|
||||
}
|
||||
}
|
||||
|
||||
func toRuntimeAPISandboxState(state string) runtimeapi.PodSandboxState {
|
||||
// Parse the state string in dockertypes.Container. This could break when
|
||||
// we upgrade docker.
|
||||
switch {
|
||||
case strings.HasPrefix(state, statusRunningPrefix):
|
||||
return runtimeapi.PodSandboxState_SANDBOX_READY
|
||||
default:
|
||||
return runtimeapi.PodSandboxState_SANDBOX_NOTREADY
|
||||
}
|
||||
}
|
||||
|
||||
func toRuntimeAPISandbox(c *dockertypes.Container) (*runtimeapi.PodSandbox, error) {
|
||||
state := toRuntimeAPISandboxState(c.Status)
|
||||
if len(c.Names) == 0 {
|
||||
return nil, fmt.Errorf("unexpected empty sandbox name: %+v", c)
|
||||
}
|
||||
metadata, err := parseSandboxName(c.Names[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
labels, annotations := extractLabels(c.Labels)
|
||||
// The timestamp in dockertypes.Container is in seconds.
|
||||
createdAt := c.Created * int64(time.Second)
|
||||
return &runtimeapi.PodSandbox{
|
||||
Id: &c.ID,
|
||||
Metadata: metadata,
|
||||
State: &state,
|
||||
CreatedAt: &createdAt,
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
}, nil
|
||||
}
|
||||
71
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/convert_test.go
generated
vendored
Normal file
71
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/convert_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockershim
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
dockertypes "github.com/docker/engine-api/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
func TestConvertDockerStatusToRuntimeAPIState(t *testing.T) {
|
||||
testCases := []struct {
|
||||
input string
|
||||
expected runtimeapi.ContainerState
|
||||
}{
|
||||
{input: "Up 5 hours", expected: runtimeapi.ContainerState_CONTAINER_RUNNING},
|
||||
{input: "Exited (0) 2 hours ago", expected: runtimeapi.ContainerState_CONTAINER_EXITED},
|
||||
{input: "Created", expected: runtimeapi.ContainerState_CONTAINER_CREATED},
|
||||
{input: "Random string", expected: runtimeapi.ContainerState_CONTAINER_UNKNOWN},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
actual := toRuntimeAPIContainerState(test.input)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertToPullableImageID(t *testing.T) {
|
||||
testCases := []struct {
|
||||
id string
|
||||
image *dockertypes.ImageInspect
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
id: "image-1",
|
||||
image: &dockertypes.ImageInspect{
|
||||
RepoDigests: []string{"digest-1"},
|
||||
},
|
||||
expected: DockerPullableImageIDPrefix + "digest-1",
|
||||
},
|
||||
{
|
||||
id: "image-2",
|
||||
image: &dockertypes.ImageInspect{
|
||||
RepoDigests: []string{},
|
||||
},
|
||||
expected: DockerImageIDPrefix + "image-2",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
actual := toPullableImageID(test.id, test.image)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
}
|
||||
}
|
||||
18
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/doc.go
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Docker integration using pkg/kubelet/api/v1alpha1/runtime/v1.pb.go.
|
||||
package dockershim
|
||||
381
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_container.go
generated
vendored
Normal file
381
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_container.go
generated
vendored
Normal file
|
|
@ -0,0 +1,381 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockershim
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
dockertypes "github.com/docker/engine-api/types"
|
||||
dockercontainer "github.com/docker/engine-api/types/container"
|
||||
dockerfilters "github.com/docker/engine-api/types/filters"
|
||||
dockerstrslice "github.com/docker/engine-api/types/strslice"
|
||||
"github.com/golang/glog"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
)
|
||||
|
||||
// ListContainers lists all containers matching the filter.
|
||||
func (ds *dockerService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
|
||||
opts := dockertypes.ContainerListOptions{All: true}
|
||||
|
||||
opts.Filter = dockerfilters.NewArgs()
|
||||
f := newDockerFilter(&opts.Filter)
|
||||
// Add filter to get *only* (non-sandbox) containers.
|
||||
f.AddLabel(containerTypeLabelKey, containerTypeLabelContainer)
|
||||
|
||||
if filter != nil {
|
||||
if filter.Id != nil {
|
||||
f.Add("id", filter.GetId())
|
||||
}
|
||||
if filter.State != nil {
|
||||
f.Add("status", toDockerContainerStatus(filter.GetState()))
|
||||
}
|
||||
if filter.PodSandboxId != nil {
|
||||
f.AddLabel(sandboxIDLabelKey, *filter.PodSandboxId)
|
||||
}
|
||||
|
||||
if filter.LabelSelector != nil {
|
||||
for k, v := range filter.LabelSelector {
|
||||
f.AddLabel(k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
containers, err := ds.client.ListContainers(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Convert docker to runtime api containers.
|
||||
result := []*runtimeapi.Container{}
|
||||
for i := range containers {
|
||||
c := containers[i]
|
||||
|
||||
converted, err := toRuntimeAPIContainer(&c)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to convert docker to runtime API container: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
result = append(result, converted)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// CreateContainer creates a new container in the given PodSandbox
|
||||
// Docker cannot store the log to an arbitrary location (yet), so we create an
|
||||
// symlink at LogPath, linking to the actual path of the log.
|
||||
// TODO: check if the default values returned by the runtime API are ok.
|
||||
func (ds *dockerService) CreateContainer(podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
|
||||
if config == nil {
|
||||
return "", fmt.Errorf("container config is nil")
|
||||
}
|
||||
if sandboxConfig == nil {
|
||||
return "", fmt.Errorf("sandbox config is nil for container %q", config.Metadata.GetName())
|
||||
}
|
||||
|
||||
labels := makeLabels(config.GetLabels(), config.GetAnnotations())
|
||||
// Apply a the container type label.
|
||||
labels[containerTypeLabelKey] = containerTypeLabelContainer
|
||||
// Write the container log path in the labels.
|
||||
labels[containerLogPathLabelKey] = filepath.Join(sandboxConfig.GetLogDirectory(), config.GetLogPath())
|
||||
// Write the sandbox ID in the labels.
|
||||
labels[sandboxIDLabelKey] = podSandboxID
|
||||
|
||||
image := ""
|
||||
if iSpec := config.GetImage(); iSpec != nil {
|
||||
image = iSpec.GetImage()
|
||||
}
|
||||
createConfig := dockertypes.ContainerCreateConfig{
|
||||
Name: makeContainerName(sandboxConfig, config),
|
||||
Config: &dockercontainer.Config{
|
||||
// TODO: set User.
|
||||
Entrypoint: dockerstrslice.StrSlice(config.GetCommand()),
|
||||
Cmd: dockerstrslice.StrSlice(config.GetArgs()),
|
||||
Env: generateEnvList(config.GetEnvs()),
|
||||
Image: image,
|
||||
WorkingDir: config.GetWorkingDir(),
|
||||
Labels: labels,
|
||||
// Interactive containers:
|
||||
OpenStdin: config.GetStdin(),
|
||||
StdinOnce: config.GetStdinOnce(),
|
||||
Tty: config.GetTty(),
|
||||
},
|
||||
}
|
||||
|
||||
// Fill the HostConfig.
|
||||
hc := &dockercontainer.HostConfig{
|
||||
Binds: generateMountBindings(config.GetMounts()),
|
||||
}
|
||||
|
||||
// Apply cgroupsParent derived from the sandbox config.
|
||||
if lc := sandboxConfig.GetLinux(); lc != nil {
|
||||
// Apply Cgroup options.
|
||||
// TODO: Check if this works with per-pod cgroups.
|
||||
// TODO: we need to pass the cgroup in syntax expected by cgroup driver but shim does not use docker info yet...
|
||||
hc.CgroupParent = lc.GetCgroupParent()
|
||||
}
|
||||
|
||||
// Apply Linux-specific options if applicable.
|
||||
if lc := config.GetLinux(); lc != nil {
|
||||
// Apply resource options.
|
||||
// TODO: Check if the units are correct.
|
||||
// TODO: Can we assume the defaults are sane?
|
||||
rOpts := lc.GetResources()
|
||||
if rOpts != nil {
|
||||
hc.Resources = dockercontainer.Resources{
|
||||
Memory: rOpts.GetMemoryLimitInBytes(),
|
||||
MemorySwap: -1,
|
||||
CPUShares: rOpts.GetCpuShares(),
|
||||
CPUQuota: rOpts.GetCpuQuota(),
|
||||
CPUPeriod: rOpts.GetCpuPeriod(),
|
||||
}
|
||||
hc.OomScoreAdj = int(rOpts.GetOomScoreAdj())
|
||||
}
|
||||
// Note: ShmSize is handled in kube_docker_client.go
|
||||
|
||||
// Apply security context.
|
||||
applyContainerSecurityContext(lc, podSandboxID, createConfig.Config, hc)
|
||||
}
|
||||
|
||||
// Set devices for container.
|
||||
devices := make([]dockercontainer.DeviceMapping, len(config.Devices))
|
||||
for i, device := range config.Devices {
|
||||
devices[i] = dockercontainer.DeviceMapping{
|
||||
PathOnHost: device.GetHostPath(),
|
||||
PathInContainer: device.GetContainerPath(),
|
||||
CgroupPermissions: device.GetPermissions(),
|
||||
}
|
||||
}
|
||||
hc.Resources.Devices = devices
|
||||
|
||||
// Apply appArmor and seccomp options.
|
||||
securityOpts, err := getContainerSecurityOpts(config.Metadata.GetName(), sandboxConfig, ds.seccompProfileRoot)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to generate container security options for container %q: %v", config.Metadata.GetName(), err)
|
||||
}
|
||||
hc.SecurityOpt = append(hc.SecurityOpt, securityOpts...)
|
||||
|
||||
createConfig.HostConfig = hc
|
||||
createResp, err := ds.client.CreateContainer(createConfig)
|
||||
recoverFromConflictIfNeeded(ds.client, err)
|
||||
|
||||
if createResp != nil {
|
||||
return createResp.ID, err
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
// getContainerLogPath returns the container log path specified by kubelet and the real
|
||||
// path where docker stores the container log.
|
||||
func (ds *dockerService) getContainerLogPath(containerID string) (string, string, error) {
|
||||
info, err := ds.client.InspectContainer(containerID)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to inspect container %q: %v", containerID, err)
|
||||
}
|
||||
return info.Config.Labels[containerLogPathLabelKey], info.LogPath, nil
|
||||
}
|
||||
|
||||
// createContainerLogSymlink creates the symlink for docker container log.
|
||||
func (ds *dockerService) createContainerLogSymlink(containerID string) error {
|
||||
path, realPath, err := ds.getContainerLogPath(containerID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get container %q log path: %v", containerID, err)
|
||||
}
|
||||
if path != "" {
|
||||
// Only create the symlink when container log path is specified.
|
||||
if err = ds.os.Symlink(realPath, path); err != nil {
|
||||
return fmt.Errorf("failed to create symbolic link %q to the container log file %q for container %q: %v",
|
||||
path, realPath, containerID, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeContainerLogSymlink removes the symlink for docker container log.
|
||||
func (ds *dockerService) removeContainerLogSymlink(containerID string) error {
|
||||
path, _, err := ds.getContainerLogPath(containerID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get container %q log path: %v", containerID, err)
|
||||
}
|
||||
if path != "" {
|
||||
// Only remove the symlink when container log path is specified.
|
||||
err := ds.os.Remove(path)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("failed to remove container %q log symlink %q: %v", containerID, path, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartContainer starts the container.
|
||||
func (ds *dockerService) StartContainer(containerID string) error {
|
||||
err := ds.client.StartContainer(containerID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start container %q: %v", containerID, err)
|
||||
}
|
||||
// Create container log symlink.
|
||||
if err := ds.createContainerLogSymlink(containerID); err != nil {
|
||||
// Do not stop the container if we failed to create symlink because:
|
||||
// 1. This is not a critical failure.
|
||||
// 2. We don't have enough information to properly stop container here.
|
||||
// Kubelet will surface this error to user via an event.
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StopContainer stops a running container with a grace period (i.e., timeout).
|
||||
func (ds *dockerService) StopContainer(containerID string, timeout int64) error {
|
||||
return ds.client.StopContainer(containerID, int(timeout))
|
||||
}
|
||||
|
||||
// RemoveContainer removes the container.
|
||||
// TODO: If a container is still running, should we forcibly remove it?
|
||||
func (ds *dockerService) RemoveContainer(containerID string) error {
|
||||
// Ideally, log lifecycle should be independent of container lifecycle.
|
||||
// However, docker will remove container log after container is removed,
|
||||
// we can't prevent that now, so we also clean up the symlink here.
|
||||
err := ds.removeContainerLogSymlink(containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ds.client.RemoveContainer(containerID, dockertypes.ContainerRemoveOptions{RemoveVolumes: true})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove container %q: %v", containerID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getContainerTimestamps(r *dockertypes.ContainerJSON) (time.Time, time.Time, time.Time, error) {
|
||||
var createdAt, startedAt, finishedAt time.Time
|
||||
var err error
|
||||
|
||||
createdAt, err = dockertools.ParseDockerTimestamp(r.Created)
|
||||
if err != nil {
|
||||
return createdAt, startedAt, finishedAt, err
|
||||
}
|
||||
startedAt, err = dockertools.ParseDockerTimestamp(r.State.StartedAt)
|
||||
if err != nil {
|
||||
return createdAt, startedAt, finishedAt, err
|
||||
}
|
||||
finishedAt, err = dockertools.ParseDockerTimestamp(r.State.FinishedAt)
|
||||
if err != nil {
|
||||
return createdAt, startedAt, finishedAt, err
|
||||
}
|
||||
return createdAt, startedAt, finishedAt, nil
|
||||
}
|
||||
|
||||
// ContainerStatus inspects the docker container and returns the status.
|
||||
func (ds *dockerService) ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error) {
|
||||
r, err := ds.client.InspectContainer(containerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Parse the timstamps.
|
||||
createdAt, startedAt, finishedAt, err := getContainerTimestamps(r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse timestamp for container %q: %v", containerID, err)
|
||||
}
|
||||
|
||||
// Convert the image id to a pullable id.
|
||||
ir, err := ds.client.InspectImageByID(r.Image)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to inspect docker image %q while inspecting docker container %q: %v", r.Image, containerID, err)
|
||||
}
|
||||
imageID := toPullableImageID(r.Image, ir)
|
||||
|
||||
// Convert the mounts.
|
||||
mounts := []*runtimeapi.Mount{}
|
||||
for i := range r.Mounts {
|
||||
m := r.Mounts[i]
|
||||
readonly := !m.RW
|
||||
mounts = append(mounts, &runtimeapi.Mount{
|
||||
HostPath: &m.Source,
|
||||
ContainerPath: &m.Destination,
|
||||
Readonly: &readonly,
|
||||
// Note: Can't set SeLinuxRelabel
|
||||
})
|
||||
}
|
||||
// Interpret container states.
|
||||
var state runtimeapi.ContainerState
|
||||
var reason, message string
|
||||
if r.State.Running {
|
||||
// Container is running.
|
||||
state = runtimeapi.ContainerState_CONTAINER_RUNNING
|
||||
} else {
|
||||
// Container is *not* running. We need to get more details.
|
||||
// * Case 1: container has run and exited with non-zero finishedAt
|
||||
// time.
|
||||
// * Case 2: container has failed to start; it has a zero finishedAt
|
||||
// time, but a non-zero exit code.
|
||||
// * Case 3: container has been created, but not started (yet).
|
||||
if !finishedAt.IsZero() { // Case 1
|
||||
state = runtimeapi.ContainerState_CONTAINER_EXITED
|
||||
switch {
|
||||
case r.State.OOMKilled:
|
||||
// TODO: consider exposing OOMKilled via the runtimeAPI.
|
||||
// Note: if an application handles OOMKilled gracefully, the
|
||||
// exit code could be zero.
|
||||
reason = "OOMKilled"
|
||||
case r.State.ExitCode == 0:
|
||||
reason = "Completed"
|
||||
default:
|
||||
reason = "Error"
|
||||
}
|
||||
} else if r.State.ExitCode != 0 { // Case 2
|
||||
state = runtimeapi.ContainerState_CONTAINER_EXITED
|
||||
// Adjust finshedAt and startedAt time to createdAt time to avoid
|
||||
// the confusion.
|
||||
finishedAt, startedAt = createdAt, createdAt
|
||||
reason = "ContainerCannotRun"
|
||||
} else { // Case 3
|
||||
state = runtimeapi.ContainerState_CONTAINER_CREATED
|
||||
}
|
||||
message = r.State.Error
|
||||
}
|
||||
|
||||
// Convert to unix timestamps.
|
||||
ct, st, ft := createdAt.UnixNano(), startedAt.UnixNano(), finishedAt.UnixNano()
|
||||
exitCode := int32(r.State.ExitCode)
|
||||
|
||||
metadata, err := parseContainerName(r.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
labels, annotations := extractLabels(r.Config.Labels)
|
||||
return &runtimeapi.ContainerStatus{
|
||||
Id: &r.ID,
|
||||
Metadata: metadata,
|
||||
Image: &runtimeapi.ImageSpec{Image: &r.Config.Image},
|
||||
ImageRef: &imageID,
|
||||
Mounts: mounts,
|
||||
ExitCode: &exitCode,
|
||||
State: &state,
|
||||
CreatedAt: &ct,
|
||||
StartedAt: &st,
|
||||
FinishedAt: &ft,
|
||||
Reason: &reason,
|
||||
Message: &message,
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
}, nil
|
||||
}
|
||||
217
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_container_test.go
generated
vendored
Normal file
217
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_container_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,217 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockershim
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
)
|
||||
|
||||
// A helper to create a basic config.
|
||||
func makeContainerConfig(sConfig *runtimeapi.PodSandboxConfig, name, image string, attempt uint32, labels, annotations map[string]string) *runtimeapi.ContainerConfig {
|
||||
return &runtimeapi.ContainerConfig{
|
||||
Metadata: &runtimeapi.ContainerMetadata{
|
||||
Name: &name,
|
||||
Attempt: &attempt,
|
||||
},
|
||||
Image: &runtimeapi.ImageSpec{Image: &image},
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
}
|
||||
}
|
||||
|
||||
// TestListContainers creates several containers and then list them to check
|
||||
// whether the correct metadatas, states, and labels are returned.
|
||||
func TestListContainers(t *testing.T) {
|
||||
ds, _, _ := newTestDockerService()
|
||||
podName, namespace := "foo", "bar"
|
||||
containerName, image := "sidecar", "logger"
|
||||
|
||||
configs := []*runtimeapi.ContainerConfig{}
|
||||
sConfigs := []*runtimeapi.PodSandboxConfig{}
|
||||
for i := 0; i < 3; i++ {
|
||||
s := makeSandboxConfig(fmt.Sprintf("%s%d", podName, i),
|
||||
fmt.Sprintf("%s%d", namespace, i), fmt.Sprintf("%d", i), 0)
|
||||
labels := map[string]string{"abc.xyz": fmt.Sprintf("label%d", i)}
|
||||
annotations := map[string]string{"foo.bar.baz": fmt.Sprintf("annotation%d", i)}
|
||||
c := makeContainerConfig(s, fmt.Sprintf("%s%d", containerName, i),
|
||||
fmt.Sprintf("%s:v%d", image, i), uint32(i), labels, annotations)
|
||||
sConfigs = append(sConfigs, s)
|
||||
configs = append(configs, c)
|
||||
}
|
||||
|
||||
expected := []*runtimeapi.Container{}
|
||||
state := runtimeapi.ContainerState_CONTAINER_RUNNING
|
||||
var createdAt int64 = 0
|
||||
for i := range configs {
|
||||
// We don't care about the sandbox id; pass a bogus one.
|
||||
sandboxID := fmt.Sprintf("sandboxid%d", i)
|
||||
id, err := ds.CreateContainer(sandboxID, configs[i], sConfigs[i])
|
||||
assert.NoError(t, err)
|
||||
err = ds.StartContainer(id)
|
||||
assert.NoError(t, err)
|
||||
|
||||
imageRef := "" // FakeDockerClient doesn't populate ImageRef yet.
|
||||
// Prepend to the expected list because ListContainers returns
|
||||
// the most recent containers first.
|
||||
expected = append([]*runtimeapi.Container{{
|
||||
Metadata: configs[i].Metadata,
|
||||
Id: &id,
|
||||
PodSandboxId: &sandboxID,
|
||||
State: &state,
|
||||
CreatedAt: &createdAt,
|
||||
Image: configs[i].Image,
|
||||
ImageRef: &imageRef,
|
||||
Labels: configs[i].Labels,
|
||||
Annotations: configs[i].Annotations,
|
||||
}}, expected...)
|
||||
}
|
||||
containers, err := ds.ListContainers(nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, containers, len(expected))
|
||||
assert.Equal(t, expected, containers)
|
||||
}
|
||||
|
||||
// TestContainerStatus tests the basic lifecycle operations and verify that
|
||||
// the status returned reflects the operations performed.
|
||||
func TestContainerStatus(t *testing.T) {
|
||||
ds, fDocker, fClock := newTestDockerService()
|
||||
sConfig := makeSandboxConfig("foo", "bar", "1", 0)
|
||||
labels := map[string]string{"abc.xyz": "foo"}
|
||||
annotations := map[string]string{"foo.bar.baz": "abc"}
|
||||
config := makeContainerConfig(sConfig, "pause", "iamimage", 0, labels, annotations)
|
||||
|
||||
var defaultTime time.Time
|
||||
dt := defaultTime.UnixNano()
|
||||
ct, st, ft := dt, dt, dt
|
||||
state := runtimeapi.ContainerState_CONTAINER_CREATED
|
||||
// The following variables are not set in FakeDockerClient.
|
||||
imageRef := DockerImageIDPrefix + ""
|
||||
exitCode := int32(0)
|
||||
var reason, message string
|
||||
|
||||
expected := &runtimeapi.ContainerStatus{
|
||||
State: &state,
|
||||
CreatedAt: &ct,
|
||||
StartedAt: &st,
|
||||
FinishedAt: &ft,
|
||||
Metadata: config.Metadata,
|
||||
Image: config.Image,
|
||||
ImageRef: &imageRef,
|
||||
ExitCode: &exitCode,
|
||||
Reason: &reason,
|
||||
Message: &message,
|
||||
Mounts: []*runtimeapi.Mount{},
|
||||
Labels: config.Labels,
|
||||
Annotations: config.Annotations,
|
||||
}
|
||||
|
||||
// Create the container.
|
||||
fClock.SetTime(time.Now().Add(-1 * time.Hour))
|
||||
*expected.CreatedAt = fClock.Now().UnixNano()
|
||||
const sandboxId = "sandboxid"
|
||||
id, err := ds.CreateContainer(sandboxId, config, sConfig)
|
||||
|
||||
// Check internal labels
|
||||
c, err := fDocker.InspectContainer(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, c.Config.Labels[containerTypeLabelKey], containerTypeLabelContainer)
|
||||
assert.Equal(t, c.Config.Labels[sandboxIDLabelKey], sandboxId)
|
||||
|
||||
// Set the id manually since we don't know the id until it's created.
|
||||
expected.Id = &id
|
||||
assert.NoError(t, err)
|
||||
status, err := ds.ContainerStatus(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expected, status)
|
||||
|
||||
// Advance the clock and start the container.
|
||||
fClock.SetTime(time.Now())
|
||||
*expected.StartedAt = fClock.Now().UnixNano()
|
||||
*expected.State = runtimeapi.ContainerState_CONTAINER_RUNNING
|
||||
|
||||
err = ds.StartContainer(id)
|
||||
assert.NoError(t, err)
|
||||
status, err = ds.ContainerStatus(id)
|
||||
assert.Equal(t, expected, status)
|
||||
|
||||
// Advance the clock and stop the container.
|
||||
fClock.SetTime(time.Now().Add(1 * time.Hour))
|
||||
*expected.FinishedAt = fClock.Now().UnixNano()
|
||||
*expected.State = runtimeapi.ContainerState_CONTAINER_EXITED
|
||||
*expected.Reason = "Completed"
|
||||
|
||||
err = ds.StopContainer(id, 0)
|
||||
assert.NoError(t, err)
|
||||
status, err = ds.ContainerStatus(id)
|
||||
assert.Equal(t, expected, status)
|
||||
|
||||
// Remove the container.
|
||||
err = ds.RemoveContainer(id)
|
||||
assert.NoError(t, err)
|
||||
status, err = ds.ContainerStatus(id)
|
||||
assert.Error(t, err, fmt.Sprintf("status of container: %+v", status))
|
||||
}
|
||||
|
||||
// TestContainerLogPath tests the container log creation logic.
|
||||
func TestContainerLogPath(t *testing.T) {
|
||||
ds, fDocker, _ := newTestDockerService()
|
||||
podLogPath := "/pod/1"
|
||||
containerLogPath := "0"
|
||||
kubeletContainerLogPath := filepath.Join(podLogPath, containerLogPath)
|
||||
sConfig := makeSandboxConfig("foo", "bar", "1", 0)
|
||||
sConfig.LogDirectory = &podLogPath
|
||||
config := makeContainerConfig(sConfig, "pause", "iamimage", 0, nil, nil)
|
||||
config.LogPath = &containerLogPath
|
||||
|
||||
const sandboxId = "sandboxid"
|
||||
id, err := ds.CreateContainer(sandboxId, config, sConfig)
|
||||
|
||||
// Check internal container log label
|
||||
c, err := fDocker.InspectContainer(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, c.Config.Labels[containerLogPathLabelKey], kubeletContainerLogPath)
|
||||
|
||||
// Set docker container log path
|
||||
dockerContainerLogPath := "/docker/container/log"
|
||||
c.LogPath = dockerContainerLogPath
|
||||
|
||||
// Verify container log symlink creation
|
||||
fakeOS := ds.os.(*containertest.FakeOS)
|
||||
fakeOS.SymlinkFn = func(oldname, newname string) error {
|
||||
assert.Equal(t, dockerContainerLogPath, oldname)
|
||||
assert.Equal(t, kubeletContainerLogPath, newname)
|
||||
return nil
|
||||
}
|
||||
err = ds.StartContainer(id)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = ds.StopContainer(id, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Verify container log symlink deletion
|
||||
err = ds.RemoveContainer(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, fakeOS.Removes, []string{kubeletContainerLogPath})
|
||||
}
|
||||
96
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image.go
generated
vendored
Normal file
96
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image.go
generated
vendored
Normal file
|
|
@ -0,0 +1,96 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockershim
|
||||
|
||||
import (
|
||||
dockertypes "github.com/docker/engine-api/types"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
)
|
||||
|
||||
// This file implements methods in ImageManagerService.
|
||||
|
||||
// ListImages lists existing images.
|
||||
func (ds *dockerService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) {
|
||||
opts := dockertypes.ImageListOptions{}
|
||||
if filter != nil {
|
||||
if imgSpec := filter.GetImage(); imgSpec != nil {
|
||||
opts.MatchName = imgSpec.GetImage()
|
||||
}
|
||||
}
|
||||
|
||||
images, err := ds.client.ListImages(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := []*runtimeapi.Image{}
|
||||
for _, i := range images {
|
||||
apiImage, err := imageToRuntimeAPIImage(&i)
|
||||
if err != nil {
|
||||
// TODO: log an error message?
|
||||
continue
|
||||
}
|
||||
result = append(result, apiImage)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ImageStatus returns the status of the image, returns nil if the image doesn't present.
|
||||
func (ds *dockerService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error) {
|
||||
imageInspect, err := ds.client.InspectImageByRef(image.GetImage())
|
||||
if err != nil {
|
||||
if dockertools.IsImageNotFoundError(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return imageInspectToRuntimeAPIImage(imageInspect)
|
||||
}
|
||||
|
||||
// PullImage pulls an image with authentication config.
|
||||
func (ds *dockerService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig) error {
|
||||
return ds.client.PullImage(image.GetImage(),
|
||||
dockertypes.AuthConfig{
|
||||
Username: auth.GetUsername(),
|
||||
Password: auth.GetPassword(),
|
||||
ServerAddress: auth.GetServerAddress(),
|
||||
IdentityToken: auth.GetIdentityToken(),
|
||||
RegistryToken: auth.GetRegistryToken(),
|
||||
},
|
||||
dockertypes.ImagePullOptions{},
|
||||
)
|
||||
}
|
||||
|
||||
// RemoveImage removes the image.
|
||||
func (ds *dockerService) RemoveImage(image *runtimeapi.ImageSpec) error {
|
||||
// If the image has multiple tags, we need to remove all the tags
|
||||
// TODO: We assume image.Image is image ID here, which is true in the current implementation
|
||||
// of kubelet, but we should still clarify this in CRI.
|
||||
imageInspect, err := ds.client.InspectImageByID(image.GetImage())
|
||||
if err == nil && imageInspect != nil && len(imageInspect.RepoTags) > 1 {
|
||||
for _, tag := range imageInspect.RepoTags {
|
||||
if _, err := ds.client.RemoveImage(tag, dockertypes.ImageRemoveOptions{PruneChildren: true}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = ds.client.RemoveImage(image.GetImage(), dockertypes.ImageRemoveOptions{PruneChildren: true})
|
||||
return err
|
||||
}
|
||||
45
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_test.go
generated
vendored
Normal file
45
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockershim
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
dockertypes "github.com/docker/engine-api/types"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
)
|
||||
|
||||
func TestRemoveImage(t *testing.T) {
|
||||
ds, fakeDocker, _ := newTestDockerService()
|
||||
id := "1111"
|
||||
fakeDocker.Image = &dockertypes.ImageInspect{ID: id, RepoTags: []string{"foo"}}
|
||||
ds.RemoveImage(&runtimeapi.ImageSpec{Image: &id})
|
||||
fakeDocker.AssertCallDetails(dockertools.NewCalledDetail("inspect_image", nil),
|
||||
dockertools.NewCalledDetail("remove_image", []interface{}{id, dockertypes.ImageRemoveOptions{PruneChildren: true}}))
|
||||
}
|
||||
|
||||
func TestRemoveImageWithMultipleTags(t *testing.T) {
|
||||
ds, fakeDocker, _ := newTestDockerService()
|
||||
id := "1111"
|
||||
fakeDocker.Image = &dockertypes.ImageInspect{ID: id, RepoTags: []string{"foo", "bar"}}
|
||||
ds.RemoveImage(&runtimeapi.ImageSpec{Image: &id})
|
||||
fakeDocker.AssertCallDetails(dockertools.NewCalledDetail("inspect_image", nil),
|
||||
dockertools.NewCalledDetail("remove_image", []interface{}{"foo", dockertypes.ImageRemoveOptions{PruneChildren: true}}),
|
||||
dockertools.NewCalledDetail("remove_image", []interface{}{"bar", dockertypes.ImageRemoveOptions{PruneChildren: true}}))
|
||||
}
|
||||
379
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go
generated
vendored
Normal file
379
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go
generated
vendored
Normal file
|
|
@ -0,0 +1,379 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockershim
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
dockertypes "github.com/docker/engine-api/types"
|
||||
dockercontainer "github.com/docker/engine-api/types/container"
|
||||
dockerfilters "github.com/docker/engine-api/types/filters"
|
||||
"github.com/golang/glog"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultSandboxImage = "gcr.io/google_containers/pause-amd64:3.0"
|
||||
|
||||
// Various default sandbox resources requests/limits.
|
||||
defaultSandboxCPUshares int64 = 2
|
||||
|
||||
// Termination grace period
|
||||
defaultSandboxGracePeriod int = 10
|
||||
|
||||
// Name of the underlying container runtime
|
||||
runtimeName = "docker"
|
||||
)
|
||||
|
||||
// RunPodSandbox creates and starts a pod-level sandbox. Runtimes should ensure
|
||||
// the sandbox is in ready state.
|
||||
// For docker, PodSandbox is implemented by a container holding the network
|
||||
// namespace for the pod.
|
||||
// Note: docker doesn't use LogDirectory (yet).
|
||||
func (ds *dockerService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (string, error) {
|
||||
// Step 1: Pull the image for the sandbox.
|
||||
image := defaultSandboxImage
|
||||
podSandboxImage := ds.podSandboxImage
|
||||
if len(podSandboxImage) != 0 {
|
||||
image = podSandboxImage
|
||||
}
|
||||
|
||||
// NOTE: To use a custom sandbox image in a private repository, users need to configure the nodes with credentials properly.
|
||||
// see: http://kubernetes.io/docs/user-guide/images/#configuring-nodes-to-authenticate-to-a-private-repository
|
||||
if err := ds.client.PullImage(image, dockertypes.AuthConfig{}, dockertypes.ImagePullOptions{}); err != nil {
|
||||
return "", fmt.Errorf("unable to pull image for the sandbox container: %v", err)
|
||||
}
|
||||
|
||||
// Step 2: Create the sandbox container.
|
||||
createConfig, err := ds.makeSandboxDockerConfig(config, image)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to make sandbox docker config for pod %q: %v", config.Metadata.GetName(), err)
|
||||
}
|
||||
createResp, err := ds.client.CreateContainer(*createConfig)
|
||||
recoverFromConflictIfNeeded(ds.client, err)
|
||||
|
||||
if err != nil || createResp == nil {
|
||||
return "", fmt.Errorf("failed to create a sandbox for pod %q: %v", config.Metadata.GetName(), err)
|
||||
}
|
||||
|
||||
// Step 3: Start the sandbox container.
|
||||
// Assume kubelet's garbage collector would remove the sandbox later, if
|
||||
// startContainer failed.
|
||||
err = ds.client.StartContainer(createResp.ID)
|
||||
if err != nil {
|
||||
return createResp.ID, fmt.Errorf("failed to start sandbox container for pod %q: %v", config.Metadata.GetName(), err)
|
||||
}
|
||||
if config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostNetwork() {
|
||||
return createResp.ID, nil
|
||||
}
|
||||
|
||||
// Step 4: Setup networking for the sandbox.
|
||||
// All pod networking is setup by a CNI plugin discovered at startup time.
|
||||
// This plugin assigns the pod ip, sets up routes inside the sandbox,
|
||||
// creates interfaces etc. In theory, its jurisdiction ends with pod
|
||||
// sandbox networking, but it might insert iptables rules or open ports
|
||||
// on the host as well, to satisfy parts of the pod spec that aren't
|
||||
// recognized by the CNI standard yet.
|
||||
cID := kubecontainer.BuildContainerID(runtimeName, createResp.ID)
|
||||
err = ds.networkPlugin.SetUpPod(config.GetMetadata().GetNamespace(), config.GetMetadata().GetName(), cID)
|
||||
// TODO: Do we need to teardown on failure or can we rely on a StopPodSandbox call with the given ID?
|
||||
return createResp.ID, err
|
||||
}
|
||||
|
||||
// StopPodSandbox stops the sandbox. If there are any running containers in the
|
||||
// sandbox, they should be force terminated.
|
||||
// TODO: This function blocks sandbox teardown on networking teardown. Is it
|
||||
// better to cut our losses assuming an out of band GC routine will cleanup
|
||||
// after us?
|
||||
func (ds *dockerService) StopPodSandbox(podSandboxID string) error {
|
||||
status, err := ds.PodSandboxStatus(podSandboxID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to get sandbox status: %v", err)
|
||||
}
|
||||
if !status.GetLinux().GetNamespaces().GetOptions().GetHostNetwork() {
|
||||
m := status.GetMetadata()
|
||||
cID := kubecontainer.BuildContainerID(runtimeName, podSandboxID)
|
||||
if err := ds.networkPlugin.TearDownPod(m.GetNamespace(), m.GetName(), cID); err != nil {
|
||||
// TODO: Figure out a way to retry this error. We can't
|
||||
// right now because the plugin throws errors when it doesn't find
|
||||
// eth0, which might not exist for various reasons (setup failed,
|
||||
// conf changed etc). In theory, it should teardown everything else
|
||||
// so there's no need to retry.
|
||||
glog.Errorf("Failed to teardown sandbox %v for pod %v/%v: %v", m.GetNamespace(), m.GetName(), podSandboxID, err)
|
||||
}
|
||||
}
|
||||
return ds.client.StopContainer(podSandboxID, defaultSandboxGracePeriod)
|
||||
// TODO: Stop all running containers in the sandbox.
|
||||
}
|
||||
|
||||
// RemovePodSandbox removes the sandbox. If there are running containers in the
|
||||
// sandbox, they should be forcibly removed.
|
||||
func (ds *dockerService) RemovePodSandbox(podSandboxID string) error {
|
||||
return ds.client.RemoveContainer(podSandboxID, dockertypes.ContainerRemoveOptions{RemoveVolumes: true})
|
||||
// TODO: remove all containers in the sandbox.
|
||||
}
|
||||
|
||||
// getIPFromPlugin interrogates the network plugin for an IP.
|
||||
func (ds *dockerService) getIPFromPlugin(sandbox *dockertypes.ContainerJSON) (string, error) {
|
||||
metadata, err := parseSandboxName(sandbox.Name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
msg := fmt.Sprintf("Couldn't find network status for %s/%s through plugin", *metadata.Namespace, *metadata.Name)
|
||||
if sharesHostNetwork(sandbox) {
|
||||
return "", fmt.Errorf("%v: not responsible for host-network sandboxes", msg)
|
||||
}
|
||||
cID := kubecontainer.BuildContainerID(runtimeName, sandbox.ID)
|
||||
networkStatus, err := ds.networkPlugin.GetPodNetworkStatus(*metadata.Namespace, *metadata.Name, cID)
|
||||
if err != nil {
|
||||
// This might be a sandbox that somehow ended up without a default
|
||||
// interface (eth0). We can't distinguish this from a more serious
|
||||
// error, so callers should probably treat it as non-fatal.
|
||||
return "", fmt.Errorf("%v: %v", msg, err)
|
||||
}
|
||||
if networkStatus == nil {
|
||||
return "", fmt.Errorf("%v: invalid network status for", msg)
|
||||
}
|
||||
return networkStatus.IP.String(), nil
|
||||
}
|
||||
|
||||
// getIP returns the ip given the output of `docker inspect` on a pod sandbox,
|
||||
// first interrogating any registered plugins, then simply trusting the ip
|
||||
// in the sandbox itself. We look for an ipv4 address before ipv6.
|
||||
func (ds *dockerService) getIP(sandbox *dockertypes.ContainerJSON) (string, error) {
|
||||
if sandbox.NetworkSettings == nil {
|
||||
return "", nil
|
||||
}
|
||||
if IP, err := ds.getIPFromPlugin(sandbox); err != nil {
|
||||
glog.Warningf("%v", err)
|
||||
} else if IP != "" {
|
||||
return IP, nil
|
||||
}
|
||||
// TODO: trusting the docker ip is not a great idea. However docker uses
|
||||
// eth0 by default and so does CNI, so if we find a docker IP here, we
|
||||
// conclude that the plugin must have failed setup, or forgotten its ip.
|
||||
// This is not a sensible assumption for plugins across the board, but if
|
||||
// a plugin doesn't want this behavior, it can throw an error.
|
||||
if sandbox.NetworkSettings.IPAddress != "" {
|
||||
return sandbox.NetworkSettings.IPAddress, nil
|
||||
}
|
||||
return sandbox.NetworkSettings.GlobalIPv6Address, nil
|
||||
}
|
||||
|
||||
// PodSandboxStatus returns the status of the PodSandbox.
|
||||
func (ds *dockerService) PodSandboxStatus(podSandboxID string) (*runtimeapi.PodSandboxStatus, error) {
|
||||
// Inspect the container.
|
||||
r, err := ds.client.InspectContainer(podSandboxID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Parse the timstamps.
|
||||
createdAt, _, _, err := getContainerTimestamps(r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse timestamp for container %q: %v", podSandboxID, err)
|
||||
}
|
||||
ct := createdAt.UnixNano()
|
||||
|
||||
// Translate container to sandbox state.
|
||||
state := runtimeapi.PodSandboxState_SANDBOX_NOTREADY
|
||||
if r.State.Running {
|
||||
state = runtimeapi.PodSandboxState_SANDBOX_READY
|
||||
}
|
||||
IP, err := ds.getIP(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
network := &runtimeapi.PodSandboxNetworkStatus{Ip: &IP}
|
||||
netNS := getNetworkNamespace(r)
|
||||
|
||||
metadata, err := parseSandboxName(r.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hostNetwork := sharesHostNetwork(r)
|
||||
labels, annotations := extractLabels(r.Config.Labels)
|
||||
return &runtimeapi.PodSandboxStatus{
|
||||
Id: &r.ID,
|
||||
State: &state,
|
||||
CreatedAt: &ct,
|
||||
Metadata: metadata,
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
Network: network,
|
||||
Linux: &runtimeapi.LinuxPodSandboxStatus{
|
||||
Namespaces: &runtimeapi.Namespace{
|
||||
Network: &netNS,
|
||||
Options: &runtimeapi.NamespaceOption{
|
||||
HostNetwork: &hostNetwork,
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ListPodSandbox returns a list of Sandbox.
|
||||
func (ds *dockerService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) {
|
||||
// By default, list all containers whether they are running or not.
|
||||
opts := dockertypes.ContainerListOptions{All: true}
|
||||
filterOutReadySandboxes := false
|
||||
|
||||
opts.Filter = dockerfilters.NewArgs()
|
||||
f := newDockerFilter(&opts.Filter)
|
||||
// Add filter to select only sandbox containers.
|
||||
f.AddLabel(containerTypeLabelKey, containerTypeLabelSandbox)
|
||||
|
||||
if filter != nil {
|
||||
if filter.Id != nil {
|
||||
f.Add("id", filter.GetId())
|
||||
}
|
||||
if filter.State != nil {
|
||||
if filter.GetState() == runtimeapi.PodSandboxState_SANDBOX_READY {
|
||||
// Only list running containers.
|
||||
opts.All = false
|
||||
} else {
|
||||
// runtimeapi.PodSandboxState_SANDBOX_NOTREADY can mean the
|
||||
// container is in any of the non-running state (e.g., created,
|
||||
// exited). We can't tell docker to filter out running
|
||||
// containers directly, so we'll need to filter them out
|
||||
// ourselves after getting the results.
|
||||
filterOutReadySandboxes = true
|
||||
}
|
||||
}
|
||||
|
||||
if filter.LabelSelector != nil {
|
||||
for k, v := range filter.LabelSelector {
|
||||
f.AddLabel(k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
containers, err := ds.client.ListContainers(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert docker containers to runtime api sandboxes.
|
||||
result := []*runtimeapi.PodSandbox{}
|
||||
for i := range containers {
|
||||
c := containers[i]
|
||||
converted, err := toRuntimeAPISandbox(&c)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to convert docker to runtime API sandbox: %v", err)
|
||||
continue
|
||||
}
|
||||
if filterOutReadySandboxes && converted.GetState() == runtimeapi.PodSandboxState_SANDBOX_READY {
|
||||
continue
|
||||
}
|
||||
|
||||
result = append(result, converted)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// applySandboxLinuxOptions applies LinuxPodSandboxConfig to dockercontainer.HostConfig and dockercontainer.ContainerCreateConfig.
|
||||
func (ds *dockerService) applySandboxLinuxOptions(hc *dockercontainer.HostConfig, lc *runtimeapi.LinuxPodSandboxConfig, createConfig *dockertypes.ContainerCreateConfig, image string) error {
|
||||
// Apply Cgroup options.
|
||||
// TODO: Check if this works with per-pod cgroups.
|
||||
hc.CgroupParent = lc.GetCgroupParent()
|
||||
// Apply security context.
|
||||
applySandboxSecurityContext(lc, createConfig.Config, hc)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// makeSandboxDockerConfig returns dockertypes.ContainerCreateConfig based on runtimeapi.PodSandboxConfig.
|
||||
func (ds *dockerService) makeSandboxDockerConfig(c *runtimeapi.PodSandboxConfig, image string) (*dockertypes.ContainerCreateConfig, error) {
|
||||
// Merge annotations and labels because docker supports only labels.
|
||||
labels := makeLabels(c.GetLabels(), c.GetAnnotations())
|
||||
// Apply a label to distinguish sandboxes from regular containers.
|
||||
labels[containerTypeLabelKey] = containerTypeLabelSandbox
|
||||
// Apply a container name label for infra container. This is used in summary v1.
|
||||
// TODO(random-liu): Deprecate this label once container metrics is directly got from CRI.
|
||||
labels[types.KubernetesContainerNameLabel] = sandboxContainerName
|
||||
|
||||
hc := &dockercontainer.HostConfig{}
|
||||
createConfig := &dockertypes.ContainerCreateConfig{
|
||||
Name: makeSandboxName(c),
|
||||
Config: &dockercontainer.Config{
|
||||
Hostname: c.GetHostname(),
|
||||
// TODO: Handle environment variables.
|
||||
Image: image,
|
||||
Labels: labels,
|
||||
},
|
||||
HostConfig: hc,
|
||||
}
|
||||
|
||||
// Set sysctls if requested
|
||||
sysctls, err := getSysctlsFromAnnotations(c.Annotations)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get sysctls from annotations %v for sandbox %q: %v", c.Annotations, c.Metadata.GetName(), err)
|
||||
}
|
||||
hc.Sysctls = sysctls
|
||||
|
||||
// Apply linux-specific options.
|
||||
if lc := c.GetLinux(); lc != nil {
|
||||
if err := ds.applySandboxLinuxOptions(hc, lc, createConfig, image); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Set port mappings.
|
||||
exposedPorts, portBindings := makePortsAndBindings(c.GetPortMappings())
|
||||
createConfig.Config.ExposedPorts = exposedPorts
|
||||
hc.PortBindings = portBindings
|
||||
|
||||
// Set DNS options.
|
||||
if dnsConfig := c.GetDnsConfig(); dnsConfig != nil {
|
||||
hc.DNS = dnsConfig.GetServers()
|
||||
hc.DNSSearch = dnsConfig.GetSearches()
|
||||
hc.DNSOptions = dnsConfig.GetOptions()
|
||||
}
|
||||
|
||||
// Apply resource options.
|
||||
setSandboxResources(hc)
|
||||
|
||||
// Set security options.
|
||||
securityOpts, err := getSandboxSecurityOpts(c, ds.seccompProfileRoot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate sandbox security options for sandbox %q: %v", c.Metadata.GetName(), err)
|
||||
}
|
||||
hc.SecurityOpt = append(hc.SecurityOpt, securityOpts...)
|
||||
return createConfig, nil
|
||||
}
|
||||
|
||||
// sharesHostNetwork true if the given container is sharing the hosts's
|
||||
// network namespace.
|
||||
func sharesHostNetwork(container *dockertypes.ContainerJSON) bool {
|
||||
if container != nil && container.HostConfig != nil {
|
||||
return string(container.HostConfig.NetworkMode) == namespaceModeHost
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func setSandboxResources(hc *dockercontainer.HostConfig) {
|
||||
hc.Resources = dockercontainer.Resources{
|
||||
MemorySwap: -1,
|
||||
CPUShares: defaultSandboxCPUshares,
|
||||
// Use docker's default cpu quota/period.
|
||||
}
|
||||
// TODO: Get rid of the dependency on kubelet internal package.
|
||||
hc.OomScoreAdj = qos.PodInfraOOMAdj
|
||||
}
|
||||
202
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox_test.go
generated
vendored
Normal file
202
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,202 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockershim
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
// A helper to create a basic config.
|
||||
func makeSandboxConfig(name, namespace, uid string, attempt uint32) *runtimeapi.PodSandboxConfig {
|
||||
return makeSandboxConfigWithLabelsAndAnnotations(name, namespace, uid, attempt, map[string]string{}, map[string]string{})
|
||||
}
|
||||
|
||||
func makeSandboxConfigWithLabelsAndAnnotations(name, namespace, uid string, attempt uint32, labels, annotations map[string]string) *runtimeapi.PodSandboxConfig {
|
||||
return &runtimeapi.PodSandboxConfig{
|
||||
Metadata: &runtimeapi.PodSandboxMetadata{
|
||||
Name: &name,
|
||||
Namespace: &namespace,
|
||||
Uid: &uid,
|
||||
Attempt: &attempt,
|
||||
},
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
}
|
||||
}
|
||||
|
||||
// TestListSandboxes creates several sandboxes and then list them to check
|
||||
// whether the correct metadatas, states, and labels are returned.
|
||||
func TestListSandboxes(t *testing.T) {
|
||||
ds, _, _ := newTestDockerService()
|
||||
name, namespace := "foo", "bar"
|
||||
configs := []*runtimeapi.PodSandboxConfig{}
|
||||
for i := 0; i < 3; i++ {
|
||||
c := makeSandboxConfigWithLabelsAndAnnotations(fmt.Sprintf("%s%d", name, i),
|
||||
fmt.Sprintf("%s%d", namespace, i), fmt.Sprintf("%d", i), 0,
|
||||
map[string]string{"label": fmt.Sprintf("foo%d", i)},
|
||||
map[string]string{"annotation": fmt.Sprintf("bar%d", i)},
|
||||
)
|
||||
configs = append(configs, c)
|
||||
}
|
||||
|
||||
expected := []*runtimeapi.PodSandbox{}
|
||||
state := runtimeapi.PodSandboxState_SANDBOX_READY
|
||||
var createdAt int64 = 0
|
||||
for i := range configs {
|
||||
id, err := ds.RunPodSandbox(configs[i])
|
||||
assert.NoError(t, err)
|
||||
// Prepend to the expected list because ListPodSandbox returns
|
||||
// the most recent sandbox first.
|
||||
expected = append([]*runtimeapi.PodSandbox{{
|
||||
Metadata: configs[i].Metadata,
|
||||
Id: &id,
|
||||
State: &state,
|
||||
CreatedAt: &createdAt,
|
||||
Labels: configs[i].Labels,
|
||||
Annotations: configs[i].Annotations,
|
||||
}}, expected...)
|
||||
}
|
||||
sandboxes, err := ds.ListPodSandbox(nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, sandboxes, len(expected))
|
||||
assert.Equal(t, expected, sandboxes)
|
||||
}
|
||||
|
||||
// TestSandboxStatus tests the basic lifecycle operations and verify that
|
||||
// the status returned reflects the operations performed.
|
||||
func TestSandboxStatus(t *testing.T) {
|
||||
ds, fDocker, fClock := newTestDockerService()
|
||||
labels := map[string]string{"label": "foobar1"}
|
||||
annotations := map[string]string{"annotation": "abc"}
|
||||
config := makeSandboxConfigWithLabelsAndAnnotations("foo", "bar", "1", 0, labels, annotations)
|
||||
|
||||
// TODO: The following variables depend on the internal
|
||||
// implementation of FakeDockerClient, and should be fixed.
|
||||
fakeIP := "2.3.4.5"
|
||||
fakeNS := fmt.Sprintf("/proc/%d/ns/net", os.Getpid())
|
||||
|
||||
state := runtimeapi.PodSandboxState_SANDBOX_READY
|
||||
ct := int64(0)
|
||||
hostNetwork := false
|
||||
expected := &runtimeapi.PodSandboxStatus{
|
||||
State: &state,
|
||||
CreatedAt: &ct,
|
||||
Metadata: config.Metadata,
|
||||
Network: &runtimeapi.PodSandboxNetworkStatus{Ip: &fakeIP},
|
||||
Linux: &runtimeapi.LinuxPodSandboxStatus{Namespaces: &runtimeapi.Namespace{Network: &fakeNS, Options: &runtimeapi.NamespaceOption{HostNetwork: &hostNetwork}}},
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
}
|
||||
|
||||
// Create the sandbox.
|
||||
fClock.SetTime(time.Now())
|
||||
*expected.CreatedAt = fClock.Now().UnixNano()
|
||||
id, err := ds.RunPodSandbox(config)
|
||||
|
||||
// Check internal labels
|
||||
c, err := fDocker.InspectContainer(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, c.Config.Labels[containerTypeLabelKey], containerTypeLabelSandbox)
|
||||
assert.Equal(t, c.Config.Labels[types.KubernetesContainerNameLabel], sandboxContainerName)
|
||||
|
||||
expected.Id = &id // ID is only known after the creation.
|
||||
status, err := ds.PodSandboxStatus(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expected, status)
|
||||
|
||||
// Stop the sandbox.
|
||||
*expected.State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
|
||||
err = ds.StopPodSandbox(id)
|
||||
assert.NoError(t, err)
|
||||
status, err = ds.PodSandboxStatus(id)
|
||||
assert.Equal(t, expected, status)
|
||||
|
||||
// Remove the container.
|
||||
err = ds.RemovePodSandbox(id)
|
||||
assert.NoError(t, err)
|
||||
status, err = ds.PodSandboxStatus(id)
|
||||
assert.Error(t, err, fmt.Sprintf("status of sandbox: %+v", status))
|
||||
}
|
||||
|
||||
// TestNetworkPluginInvocation checks that the right SetUpPod and TearDownPod
|
||||
// calls are made when we run/stop a sandbox.
|
||||
func TestNetworkPluginInvocation(t *testing.T) {
|
||||
ds, _, _ := newTestDockerService()
|
||||
mockPlugin := newTestNetworkPlugin(t)
|
||||
ds.networkPlugin = mockPlugin
|
||||
defer mockPlugin.Finish()
|
||||
|
||||
name := "foo0"
|
||||
ns := "bar0"
|
||||
c := makeSandboxConfigWithLabelsAndAnnotations(
|
||||
name, ns, "0", 0,
|
||||
map[string]string{"label": name},
|
||||
map[string]string{"annotation": ns},
|
||||
)
|
||||
cID := kubecontainer.ContainerID{Type: runtimeName, ID: fmt.Sprintf("/%v", makeSandboxName(c))}
|
||||
|
||||
setup := mockPlugin.EXPECT().SetUpPod(ns, name, cID)
|
||||
// StopPodSandbox performs a lookup on status to figure out if the sandbox
|
||||
// is running with hostnetworking, as all its given is the ID.
|
||||
mockPlugin.EXPECT().GetPodNetworkStatus(ns, name, cID)
|
||||
mockPlugin.EXPECT().TearDownPod(ns, name, cID).After(setup)
|
||||
|
||||
_, err := ds.RunPodSandbox(c)
|
||||
assert.NoError(t, err)
|
||||
err = ds.StopPodSandbox(cID.ID)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// TestHostNetworkPluginInvocation checks that *no* SetUp/TearDown calls happen
|
||||
// for host network sandboxes.
|
||||
func TestHostNetworkPluginInvocation(t *testing.T) {
|
||||
ds, _, _ := newTestDockerService()
|
||||
mockPlugin := newTestNetworkPlugin(t)
|
||||
ds.networkPlugin = mockPlugin
|
||||
defer mockPlugin.Finish()
|
||||
|
||||
name := "foo0"
|
||||
ns := "bar0"
|
||||
c := makeSandboxConfigWithLabelsAndAnnotations(
|
||||
name, ns, "0", 0,
|
||||
map[string]string{"label": name},
|
||||
map[string]string{"annotation": ns},
|
||||
)
|
||||
hostNetwork := true
|
||||
c.Linux = &runtimeapi.LinuxPodSandboxConfig{
|
||||
SecurityContext: &runtimeapi.LinuxSandboxSecurityContext{
|
||||
NamespaceOptions: &runtimeapi.NamespaceOption{
|
||||
HostNetwork: &hostNetwork,
|
||||
},
|
||||
},
|
||||
}
|
||||
cID := kubecontainer.ContainerID{Type: runtimeName, ID: fmt.Sprintf("/%v", makeSandboxName(c))}
|
||||
|
||||
// No calls to network plugin are expected
|
||||
_, err := ds.RunPodSandbox(c)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, ds.StopPodSandbox(cID.ID))
|
||||
}
|
||||
256
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service.go
generated
vendored
Normal file
256
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service.go
generated
vendored
Normal file
|
|
@ -0,0 +1,256 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockershim
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/api"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/cm"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network/cni"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network/kubenet"
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/streaming"
|
||||
)
|
||||
|
||||
const (
|
||||
dockerRuntimeName = "docker"
|
||||
kubeAPIVersion = "0.1.0"
|
||||
|
||||
// String used to detect docker host mode for various namespaces (e.g.
|
||||
// networking). Must match the value returned by docker inspect -f
|
||||
// '{{.HostConfig.NetworkMode}}'.
|
||||
namespaceModeHost = "host"
|
||||
|
||||
dockerNetNSFmt = "/proc/%v/ns/net"
|
||||
|
||||
defaultSeccompProfile = "unconfined"
|
||||
|
||||
// Internal docker labels used to identify whether a container is a sandbox
|
||||
// or a regular container.
|
||||
// TODO: This is not backward compatible with older containers. We will
|
||||
// need to add filtering based on names.
|
||||
containerTypeLabelKey = "io.kubernetes.docker.type"
|
||||
containerTypeLabelSandbox = "podsandbox"
|
||||
containerTypeLabelContainer = "container"
|
||||
containerLogPathLabelKey = "io.kubernetes.container.logpath"
|
||||
sandboxIDLabelKey = "io.kubernetes.sandbox.id"
|
||||
|
||||
// TODO: https://github.com/kubernetes/kubernetes/pull/31169 provides experimental
|
||||
// defaulting of host user namespace that may be enabled when the docker daemon
|
||||
// is using remapped UIDs.
|
||||
// Dockershim should provide detection support for a remapping environment .
|
||||
// This should be included in the feature proposal. Defaulting may still occur according
|
||||
// to kubelet behavior and system settings in addition to any API flags that may be introduced.
|
||||
)
|
||||
|
||||
// NetworkPluginSettings is the subset of kubelet runtime args we pass
|
||||
// to the container runtime shim so it can probe for network plugins.
|
||||
// In the future we will feed these directly to a standalone container
|
||||
// runtime process.
|
||||
type NetworkPluginSettings struct {
|
||||
// HairpinMode is best described by comments surrounding the kubelet arg
|
||||
HairpinMode componentconfig.HairpinMode
|
||||
// NonMasqueradeCIDR is the range of ips which should *not* be included
|
||||
// in any MASQUERADE rules applied by the plugin
|
||||
NonMasqueradeCIDR string
|
||||
// PluginName is the name of the plugin, runtime shim probes for
|
||||
PluginName string
|
||||
// PluginBinDir is the directory in which the binaries for the plugin with
|
||||
// PluginName is kept. The admin is responsible for provisioning these
|
||||
// binaries before-hand.
|
||||
PluginBinDir string
|
||||
// PluginConfDir is the directory in which the admin places a CNI conf.
|
||||
// Depending on the plugin, this may be an optional field, eg: kubenet
|
||||
// generates its own plugin conf.
|
||||
PluginConfDir string
|
||||
// MTU is the desired MTU for network devices created by the plugin.
|
||||
MTU int
|
||||
|
||||
// RuntimeHost is an interface that serves as a trap-door from plugin back
|
||||
// into the kubelet.
|
||||
// TODO: This shouldn't be required, remove once we move host ports into CNI
|
||||
// and figure out bandwidth shaping. See corresponding comments above
|
||||
// network.Host interface.
|
||||
LegacyRuntimeHost network.LegacyHost
|
||||
}
|
||||
|
||||
var internalLabelKeys []string = []string{containerTypeLabelKey, containerLogPathLabelKey, sandboxIDLabelKey}
|
||||
|
||||
// NOTE: Anything passed to DockerService should be eventually handled in another way when we switch to running the shim as a different process.
|
||||
func NewDockerService(client dockertools.DockerInterface, seccompProfileRoot string, podSandboxImage string, streamingConfig *streaming.Config, pluginSettings *NetworkPluginSettings, cgroupsName string) (DockerService, error) {
|
||||
c := dockertools.NewInstrumentedDockerInterface(client)
|
||||
ds := &dockerService{
|
||||
seccompProfileRoot: seccompProfileRoot,
|
||||
client: c,
|
||||
os: kubecontainer.RealOS{},
|
||||
podSandboxImage: podSandboxImage,
|
||||
streamingRuntime: &streamingRuntime{
|
||||
client: client,
|
||||
// Only the native exec handling is supported for now.
|
||||
// TODO(#35747) - Either deprecate nsenter exec handling, or add support for it here.
|
||||
execHandler: &dockertools.NativeExecHandler{},
|
||||
},
|
||||
containerManager: cm.NewContainerManager(cgroupsName, client),
|
||||
}
|
||||
if streamingConfig != nil {
|
||||
var err error
|
||||
ds.streamingServer, err = streaming.NewServer(*streamingConfig, ds.streamingRuntime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// dockershim currently only supports CNI plugins.
|
||||
cniPlugins := cni.ProbeNetworkPlugins(pluginSettings.PluginConfDir, pluginSettings.PluginBinDir)
|
||||
cniPlugins = append(cniPlugins, kubenet.NewPlugin(pluginSettings.PluginBinDir))
|
||||
netHost := &dockerNetworkHost{
|
||||
pluginSettings.LegacyRuntimeHost,
|
||||
&namespaceGetter{ds},
|
||||
}
|
||||
plug, err := network.InitNetworkPlugin(cniPlugins, pluginSettings.PluginName, netHost, pluginSettings.HairpinMode, pluginSettings.NonMasqueradeCIDR, pluginSettings.MTU)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("didn't find compatible CNI plugin with given settings %+v: %v", pluginSettings, err)
|
||||
}
|
||||
ds.networkPlugin = plug
|
||||
glog.Infof("Docker cri networking managed by %v", plug.Name())
|
||||
return ds, nil
|
||||
}
|
||||
|
||||
// DockerService is an interface that embeds the new RuntimeService and
|
||||
// ImageService interfaces.
|
||||
type DockerService interface {
|
||||
internalapi.RuntimeService
|
||||
internalapi.ImageManagerService
|
||||
Start() error
|
||||
// For serving streaming calls.
|
||||
http.Handler
|
||||
}
|
||||
|
||||
type dockerService struct {
|
||||
seccompProfileRoot string
|
||||
client dockertools.DockerInterface
|
||||
os kubecontainer.OSInterface
|
||||
podSandboxImage string
|
||||
streamingRuntime *streamingRuntime
|
||||
streamingServer streaming.Server
|
||||
networkPlugin network.NetworkPlugin
|
||||
containerManager cm.ContainerManager
|
||||
}
|
||||
|
||||
// Version returns the runtime name, runtime version and runtime API version
|
||||
func (ds *dockerService) Version(_ string) (*runtimeapi.VersionResponse, error) {
|
||||
v, err := ds.client.Version()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("docker: failed to get docker version: %v", err)
|
||||
}
|
||||
runtimeAPIVersion := kubeAPIVersion
|
||||
name := dockerRuntimeName
|
||||
// Docker API version (e.g., 1.23) is not semver compatible. Add a ".0"
|
||||
// suffix to remedy this.
|
||||
apiVersion := fmt.Sprintf("%s.0", v.APIVersion)
|
||||
return &runtimeapi.VersionResponse{
|
||||
Version: &runtimeAPIVersion,
|
||||
RuntimeName: &name,
|
||||
RuntimeVersion: &v.Version,
|
||||
RuntimeApiVersion: &apiVersion,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UpdateRuntimeConfig updates the runtime config. Currently only handles podCIDR updates.
|
||||
func (ds *dockerService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) (err error) {
|
||||
if runtimeConfig == nil {
|
||||
return
|
||||
}
|
||||
glog.Infof("docker cri received runtime config %+v", runtimeConfig)
|
||||
if ds.networkPlugin != nil && runtimeConfig.NetworkConfig.PodCidr != nil {
|
||||
event := make(map[string]interface{})
|
||||
event[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = *runtimeConfig.NetworkConfig.PodCidr
|
||||
ds.networkPlugin.Event(network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE, event)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// namespaceGetter is a wrapper around the dockerService that implements
|
||||
// the network.NamespaceGetter interface.
|
||||
type namespaceGetter struct {
|
||||
*dockerService
|
||||
}
|
||||
|
||||
// GetNetNS returns the network namespace of the given containerID. The ID
|
||||
// supplied is typically the ID of a pod sandbox. This getter doesn't try
|
||||
// to map non-sandbox IDs to their respective sandboxes.
|
||||
func (ds *dockerService) GetNetNS(podSandboxID string) (string, error) {
|
||||
r, err := ds.client.InspectContainer(podSandboxID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return getNetworkNamespace(r), nil
|
||||
}
|
||||
|
||||
// dockerNetworkHost implements network.Host by wrapping the legacy host
|
||||
// passed in by the kubelet and adding NamespaceGetter methods. The legacy
|
||||
// host methods are slated for deletion.
|
||||
type dockerNetworkHost struct {
|
||||
network.LegacyHost
|
||||
*namespaceGetter
|
||||
}
|
||||
|
||||
// Start initializes and starts components in dockerService.
|
||||
func (ds *dockerService) Start() error {
|
||||
return ds.containerManager.Start()
|
||||
}
|
||||
|
||||
// Status returns the status of the runtime.
|
||||
// TODO(random-liu): Set network condition accordingly here.
|
||||
func (ds *dockerService) Status() (*runtimeapi.RuntimeStatus, error) {
|
||||
runtimeReady := &runtimeapi.RuntimeCondition{
|
||||
Type: proto.String(runtimeapi.RuntimeReady),
|
||||
Status: proto.Bool(true),
|
||||
}
|
||||
networkReady := &runtimeapi.RuntimeCondition{
|
||||
Type: proto.String(runtimeapi.NetworkReady),
|
||||
Status: proto.Bool(true),
|
||||
}
|
||||
conditions := []*runtimeapi.RuntimeCondition{runtimeReady, networkReady}
|
||||
if _, err := ds.client.Version(); err != nil {
|
||||
runtimeReady.Status = proto.Bool(false)
|
||||
runtimeReady.Reason = proto.String("DockerDaemonNotReady")
|
||||
runtimeReady.Message = proto.String(fmt.Sprintf("docker: failed to get docker version: %v", err))
|
||||
}
|
||||
if err := ds.networkPlugin.Status(); err != nil {
|
||||
networkReady.Status = proto.Bool(false)
|
||||
networkReady.Reason = proto.String("NetworkPluginNotReady")
|
||||
networkReady.Message = proto.String(fmt.Sprintf("docker: network plugin is not ready: %v", err))
|
||||
}
|
||||
return &runtimeapi.RuntimeStatus{Conditions: conditions}, nil
|
||||
}
|
||||
|
||||
func (ds *dockerService) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if ds.streamingServer != nil {
|
||||
ds.streamingServer.ServeHTTP(w, r)
|
||||
} else {
|
||||
http.NotFound(w, r)
|
||||
}
|
||||
}
|
||||
91
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service_test.go
generated
vendored
Normal file
91
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,91 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockershim
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network/mock_network"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
)
|
||||
|
||||
// newTestNetworkPlugin returns a mock plugin that implements network.NetworkPlugin
|
||||
func newTestNetworkPlugin(t *testing.T) *mock_network.MockNetworkPlugin {
|
||||
ctrl := gomock.NewController(t)
|
||||
return mock_network.NewMockNetworkPlugin(ctrl)
|
||||
}
|
||||
|
||||
func newTestDockerService() (*dockerService, *dockertools.FakeDockerClient, *clock.FakeClock) {
|
||||
fakeClock := clock.NewFakeClock(time.Time{})
|
||||
c := dockertools.NewFakeDockerClientWithClock(fakeClock)
|
||||
return &dockerService{client: c, os: &containertest.FakeOS{}, networkPlugin: &network.NoopNetworkPlugin{}}, c, fakeClock
|
||||
}
|
||||
|
||||
// TestStatus tests the runtime status logic.
|
||||
func TestStatus(t *testing.T) {
|
||||
ds, fDocker, _ := newTestDockerService()
|
||||
|
||||
assertStatus := func(expected map[string]bool, status *runtimeapi.RuntimeStatus) {
|
||||
conditions := status.GetConditions()
|
||||
assert.Equal(t, len(expected), len(conditions))
|
||||
for k, v := range expected {
|
||||
for _, c := range conditions {
|
||||
if k == c.GetType() {
|
||||
assert.Equal(t, v, c.GetStatus())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Should report ready status if version returns no error.
|
||||
status, err := ds.Status()
|
||||
assert.NoError(t, err)
|
||||
assertStatus(map[string]bool{
|
||||
runtimeapi.RuntimeReady: true,
|
||||
runtimeapi.NetworkReady: true,
|
||||
}, status)
|
||||
|
||||
// Should not report ready status if version returns error.
|
||||
fDocker.InjectError("version", errors.New("test error"))
|
||||
status, err = ds.Status()
|
||||
assert.NoError(t, err)
|
||||
assertStatus(map[string]bool{
|
||||
runtimeapi.RuntimeReady: false,
|
||||
runtimeapi.NetworkReady: true,
|
||||
}, status)
|
||||
|
||||
// Should not report ready status is network plugin returns error.
|
||||
mockPlugin := newTestNetworkPlugin(t)
|
||||
ds.networkPlugin = mockPlugin
|
||||
defer mockPlugin.Finish()
|
||||
mockPlugin.EXPECT().Status().Return(errors.New("network error"))
|
||||
status, err = ds.Status()
|
||||
assert.NoError(t, err)
|
||||
assertStatus(map[string]bool{
|
||||
runtimeapi.RuntimeReady: true,
|
||||
runtimeapi.NetworkReady: false,
|
||||
}, status)
|
||||
}
|
||||
130
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming.go
generated
vendored
Normal file
130
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming.go
generated
vendored
Normal file
|
|
@ -0,0 +1,130 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockershim
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
dockertypes "github.com/docker/engine-api/types"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/streaming"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/ioutils"
|
||||
"k8s.io/kubernetes/pkg/util/term"
|
||||
)
|
||||
|
||||
type streamingRuntime struct {
|
||||
client dockertools.DockerInterface
|
||||
execHandler dockertools.ExecHandler
|
||||
}
|
||||
|
||||
var _ streaming.Runtime = &streamingRuntime{}
|
||||
|
||||
func (r *streamingRuntime) Exec(containerID string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan term.Size) error {
|
||||
return r.exec(containerID, cmd, in, out, err, tty, resize, 0)
|
||||
}
|
||||
|
||||
// Internal version of Exec adds a timeout.
|
||||
func (r *streamingRuntime) exec(containerID string, cmd []string, in io.Reader, out, errw io.WriteCloser, tty bool, resize <-chan term.Size, timeout time.Duration) error {
|
||||
container, err := checkContainerStatus(r.client, containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return r.execHandler.ExecInContainer(r.client, container, cmd, in, out, errw, tty, resize, timeout)
|
||||
}
|
||||
|
||||
func (r *streamingRuntime) Attach(containerID string, in io.Reader, out, errw io.WriteCloser, tty bool, resize <-chan term.Size) error {
|
||||
_, err := checkContainerStatus(r.client, containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dockertools.AttachContainer(r.client, containerID, in, out, errw, tty, resize)
|
||||
}
|
||||
|
||||
func (r *streamingRuntime) PortForward(podSandboxID string, port int32, stream io.ReadWriteCloser) error {
|
||||
if port < 0 || port > math.MaxUint16 {
|
||||
return fmt.Errorf("invalid port %d", port)
|
||||
}
|
||||
return dockertools.PortForward(r.client, podSandboxID, uint16(port), stream)
|
||||
}
|
||||
|
||||
// ExecSync executes a command in the container, and returns the stdout output.
|
||||
// If command exits with a non-zero exit code, an error is returned.
|
||||
func (ds *dockerService) ExecSync(containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error) {
|
||||
var stdoutBuffer, stderrBuffer bytes.Buffer
|
||||
err = ds.streamingRuntime.exec(containerID, cmd,
|
||||
nil, // in
|
||||
ioutils.WriteCloserWrapper(&stdoutBuffer),
|
||||
ioutils.WriteCloserWrapper(&stderrBuffer),
|
||||
false, // tty
|
||||
nil, // resize
|
||||
timeout)
|
||||
return stdoutBuffer.Bytes(), stderrBuffer.Bytes(), err
|
||||
}
|
||||
|
||||
// Exec prepares a streaming endpoint to execute a command in the container, and returns the address.
|
||||
func (ds *dockerService) Exec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
|
||||
if ds.streamingServer == nil {
|
||||
return nil, streaming.ErrorStreamingDisabled("exec")
|
||||
}
|
||||
_, err := checkContainerStatus(ds.client, req.GetContainerId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ds.streamingServer.GetExec(req)
|
||||
}
|
||||
|
||||
// Attach prepares a streaming endpoint to attach to a running container, and returns the address.
|
||||
func (ds *dockerService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
|
||||
if ds.streamingServer == nil {
|
||||
return nil, streaming.ErrorStreamingDisabled("attach")
|
||||
}
|
||||
_, err := checkContainerStatus(ds.client, req.GetContainerId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ds.streamingServer.GetAttach(req)
|
||||
}
|
||||
|
||||
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address.
|
||||
func (ds *dockerService) PortForward(req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
|
||||
if ds.streamingServer == nil {
|
||||
return nil, streaming.ErrorStreamingDisabled("port forward")
|
||||
}
|
||||
_, err := checkContainerStatus(ds.client, req.GetPodSandboxId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO(timstclair): Verify that ports are exposed.
|
||||
return ds.streamingServer.GetPortForward(req)
|
||||
}
|
||||
|
||||
func checkContainerStatus(client dockertools.DockerInterface, containerID string) (*dockertypes.ContainerJSON, error) {
|
||||
container, err := client.InspectContainer(containerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !container.State.Running {
|
||||
return nil, fmt.Errorf("container not running (%s)", container.ID)
|
||||
}
|
||||
return container, nil
|
||||
}
|
||||
316
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers.go
generated
vendored
Normal file
316
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers.go
generated
vendored
Normal file
|
|
@ -0,0 +1,316 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockershim
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
dockertypes "github.com/docker/engine-api/types"
|
||||
dockerfilters "github.com/docker/engine-api/types/filters"
|
||||
dockerapiversion "github.com/docker/engine-api/types/versions"
|
||||
dockernat "github.com/docker/go-connections/nat"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
const (
|
||||
annotationPrefix = "annotation."
|
||||
)
|
||||
|
||||
var (
|
||||
conflictRE = regexp.MustCompile(`Conflict. (?:.)+ is already in use by container ([0-9a-z]+)`)
|
||||
)
|
||||
|
||||
// apiVersion implements kubecontainer.Version interface by implementing
|
||||
// Compare() and String(). It uses the compare function of engine-api to
|
||||
// compare docker apiversions.
|
||||
type apiVersion string
|
||||
|
||||
func (v apiVersion) String() string {
|
||||
return string(v)
|
||||
}
|
||||
|
||||
func (v apiVersion) Compare(other string) (int, error) {
|
||||
if dockerapiversion.LessThan(string(v), other) {
|
||||
return -1, nil
|
||||
} else if dockerapiversion.GreaterThan(string(v), other) {
|
||||
return 1, nil
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// generateEnvList converts KeyValue list to a list of strings, in the form of
|
||||
// '<key>=<value>', which can be understood by docker.
|
||||
func generateEnvList(envs []*runtimeapi.KeyValue) (result []string) {
|
||||
for _, env := range envs {
|
||||
result = append(result, fmt.Sprintf("%s=%s", env.GetKey(), env.GetValue()))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// makeLabels converts annotations to labels and merge them with the given
|
||||
// labels. This is necessary because docker does not support annotations;
|
||||
// we *fake* annotations using labels. Note that docker labels are not
|
||||
// updatable.
|
||||
func makeLabels(labels, annotations map[string]string) map[string]string {
|
||||
merged := make(map[string]string)
|
||||
for k, v := range labels {
|
||||
merged[k] = v
|
||||
}
|
||||
for k, v := range annotations {
|
||||
// Assume there won't be conflict.
|
||||
merged[fmt.Sprintf("%s%s", annotationPrefix, k)] = v
|
||||
}
|
||||
return merged
|
||||
}
|
||||
|
||||
// extractLabels converts raw docker labels to the CRI labels and annotations.
|
||||
// It also filters out internal labels used by this shim.
|
||||
func extractLabels(input map[string]string) (map[string]string, map[string]string) {
|
||||
labels := make(map[string]string)
|
||||
annotations := make(map[string]string)
|
||||
for k, v := range input {
|
||||
// Check if the key is used internally by the shim.
|
||||
internal := false
|
||||
for _, internalKey := range internalLabelKeys {
|
||||
if k == internalKey {
|
||||
internal = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if internal {
|
||||
continue
|
||||
}
|
||||
|
||||
// Delete the container name label for the sandbox. It is added in the shim,
|
||||
// should not be exposed via CRI.
|
||||
if k == types.KubernetesContainerNameLabel &&
|
||||
input[containerTypeLabelKey] == containerTypeLabelSandbox {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if the label should be treated as an annotation.
|
||||
if strings.HasPrefix(k, annotationPrefix) {
|
||||
annotations[strings.TrimPrefix(k, annotationPrefix)] = v
|
||||
continue
|
||||
}
|
||||
labels[k] = v
|
||||
}
|
||||
return labels, annotations
|
||||
}
|
||||
|
||||
// generateMountBindings converts the mount list to a list of strings that
|
||||
// can be understood by docker.
|
||||
// Each element in the string is in the form of:
|
||||
// '<HostPath>:<ContainerPath>', or
|
||||
// '<HostPath>:<ContainerPath>:ro', if the path is read only, or
|
||||
// '<HostPath>:<ContainerPath>:Z', if the volume requires SELinux
|
||||
// relabeling and the pod provides an SELinux label
|
||||
func generateMountBindings(mounts []*runtimeapi.Mount) (result []string) {
|
||||
for _, m := range mounts {
|
||||
bind := fmt.Sprintf("%s:%s", m.GetHostPath(), m.GetContainerPath())
|
||||
readOnly := m.GetReadonly()
|
||||
if readOnly {
|
||||
bind += ":ro"
|
||||
}
|
||||
// Only request relabeling if the pod provides an SELinux context. If the pod
|
||||
// does not provide an SELinux context relabeling will label the volume with
|
||||
// the container's randomly allocated MCS label. This would restrict access
|
||||
// to the volume to the container which mounts it first.
|
||||
if m.GetSelinuxRelabel() {
|
||||
if readOnly {
|
||||
bind += ",Z"
|
||||
} else {
|
||||
bind += ":Z"
|
||||
}
|
||||
}
|
||||
result = append(result, bind)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func makePortsAndBindings(pm []*runtimeapi.PortMapping) (map[dockernat.Port]struct{}, map[dockernat.Port][]dockernat.PortBinding) {
|
||||
exposedPorts := map[dockernat.Port]struct{}{}
|
||||
portBindings := map[dockernat.Port][]dockernat.PortBinding{}
|
||||
for _, port := range pm {
|
||||
exteriorPort := port.GetHostPort()
|
||||
if exteriorPort == 0 {
|
||||
// No need to do port binding when HostPort is not specified
|
||||
continue
|
||||
}
|
||||
interiorPort := port.GetContainerPort()
|
||||
// Some of this port stuff is under-documented voodoo.
|
||||
// See http://stackoverflow.com/questions/20428302/binding-a-port-to-a-host-interface-using-the-rest-api
|
||||
var protocol string
|
||||
switch strings.ToUpper(string(port.GetProtocol())) {
|
||||
case "UDP":
|
||||
protocol = "/udp"
|
||||
case "TCP":
|
||||
protocol = "/tcp"
|
||||
default:
|
||||
glog.Warningf("Unknown protocol %q: defaulting to TCP", port.Protocol)
|
||||
protocol = "/tcp"
|
||||
}
|
||||
|
||||
dockerPort := dockernat.Port(strconv.Itoa(int(interiorPort)) + protocol)
|
||||
exposedPorts[dockerPort] = struct{}{}
|
||||
|
||||
hostBinding := dockernat.PortBinding{
|
||||
HostPort: strconv.Itoa(int(exteriorPort)),
|
||||
HostIP: port.GetHostIp(),
|
||||
}
|
||||
|
||||
// Allow multiple host ports bind to same docker port
|
||||
if existedBindings, ok := portBindings[dockerPort]; ok {
|
||||
// If a docker port already map to a host port, just append the host ports
|
||||
portBindings[dockerPort] = append(existedBindings, hostBinding)
|
||||
} else {
|
||||
// Otherwise, it's fresh new port binding
|
||||
portBindings[dockerPort] = []dockernat.PortBinding{
|
||||
hostBinding,
|
||||
}
|
||||
}
|
||||
}
|
||||
return exposedPorts, portBindings
|
||||
}
|
||||
|
||||
// getContainerSecurityOpt gets container security options from container and sandbox config, currently from sandbox
|
||||
// annotations.
|
||||
// It is an experimental feature and may be promoted to official runtime api in the future.
|
||||
func getContainerSecurityOpts(containerName string, sandboxConfig *runtimeapi.PodSandboxConfig, seccompProfileRoot string) ([]string, error) {
|
||||
appArmorOpts, err := dockertools.GetAppArmorOpts(sandboxConfig.GetAnnotations(), containerName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
seccompOpts, err := dockertools.GetSeccompOpts(sandboxConfig.GetAnnotations(), containerName, seccompProfileRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
securityOpts := append(appArmorOpts, seccompOpts...)
|
||||
var opts []string
|
||||
for _, securityOpt := range securityOpts {
|
||||
k, v := securityOpt.GetKV()
|
||||
opts = append(opts, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
func getSandboxSecurityOpts(sandboxConfig *runtimeapi.PodSandboxConfig, seccompProfileRoot string) ([]string, error) {
|
||||
// sandboxContainerName doesn't exist in the pod, so pod security options will be returned by default.
|
||||
return getContainerSecurityOpts(sandboxContainerName, sandboxConfig, seccompProfileRoot)
|
||||
}
|
||||
|
||||
func getNetworkNamespace(c *dockertypes.ContainerJSON) string {
|
||||
if c.State.Pid == 0 {
|
||||
// Docker reports pid 0 for an exited container. We can't use it to
|
||||
// check the network namespace, so return an empty string instead.
|
||||
glog.V(4).Infof("Cannot find network namespace for the terminated container %q", c.ID)
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf(dockerNetNSFmt, c.State.Pid)
|
||||
}
|
||||
|
||||
// getSysctlsFromAnnotations gets sysctls from annotations.
|
||||
func getSysctlsFromAnnotations(annotations map[string]string) (map[string]string, error) {
|
||||
var results map[string]string
|
||||
|
||||
sysctls, unsafeSysctls, err := v1.SysctlsFromPodAnnotations(annotations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(sysctls)+len(unsafeSysctls) > 0 {
|
||||
results = make(map[string]string, len(sysctls)+len(unsafeSysctls))
|
||||
for _, c := range sysctls {
|
||||
results[c.Name] = c.Value
|
||||
}
|
||||
for _, c := range unsafeSysctls {
|
||||
results[c.Name] = c.Value
|
||||
}
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// dockerFilter wraps around dockerfilters.Args and provides methods to modify
|
||||
// the filter easily.
|
||||
type dockerFilter struct {
|
||||
args *dockerfilters.Args
|
||||
}
|
||||
|
||||
func newDockerFilter(args *dockerfilters.Args) *dockerFilter {
|
||||
return &dockerFilter{args: args}
|
||||
}
|
||||
|
||||
func (f *dockerFilter) Add(key, value string) {
|
||||
f.args.Add(key, value)
|
||||
}
|
||||
|
||||
func (f *dockerFilter) AddLabel(key, value string) {
|
||||
f.Add("label", fmt.Sprintf("%s=%s", key, value))
|
||||
}
|
||||
|
||||
// getUserFromImageUser gets uid or user name of the image user.
|
||||
// If user is numeric, it will be treated as uid; or else, it is treated as user name.
|
||||
func getUserFromImageUser(imageUser string) (*int64, *string) {
|
||||
user := dockertools.GetUserFromImageUser(imageUser)
|
||||
// return both nil if user is not specified in the image.
|
||||
if user == "" {
|
||||
return nil, nil
|
||||
}
|
||||
// user could be either uid or user name. Try to interpret as numeric uid.
|
||||
uid, err := strconv.ParseInt(user, 10, 64)
|
||||
if err != nil {
|
||||
// If user is non numeric, assume it's user name.
|
||||
return nil, &user
|
||||
}
|
||||
// If user is a numeric uid.
|
||||
return &uid, nil
|
||||
}
|
||||
|
||||
// See #33189. If the previous attempt to create a sandbox container name FOO
|
||||
// failed due to "device or resource busy", it is possbile that docker did
|
||||
// not clean up properly and has inconsistent internal state. Docker would
|
||||
// not report the existence of FOO, but would complain if user wants to
|
||||
// create a new container named FOO. To work around this, we parse the error
|
||||
// message to identify failure caused by naming conflict, and try to remove
|
||||
// the old container FOO.
|
||||
// TODO(#33189): Monitor the tests to see if the fix is sufficent.
|
||||
func recoverFromConflictIfNeeded(client dockertools.DockerInterface, err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
matches := conflictRE.FindStringSubmatch(err.Error())
|
||||
if len(matches) != 2 {
|
||||
return
|
||||
}
|
||||
|
||||
id := matches[1]
|
||||
glog.Warningf("Unable to create pod sandbox due to conflict. Attempting to remove sandbox %q", id)
|
||||
if err := client.RemoveContainer(id, dockertypes.ContainerRemoveOptions{RemoveVolumes: true}); err != nil {
|
||||
glog.Errorf("Failed to remove the conflicting sandbox container: %v", err)
|
||||
} else {
|
||||
glog.V(2).Infof("Successfully removed conflicting sandbox %q", id)
|
||||
}
|
||||
}
|
||||
239
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_test.go
generated
vendored
Normal file
239
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,239 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockershim
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
)
|
||||
|
||||
func TestLabelsAndAnnotationsRoundTrip(t *testing.T) {
|
||||
expectedLabels := map[string]string{"foo.123.abc": "baz", "bar.456.xyz": "qwe"}
|
||||
expectedAnnotations := map[string]string{"uio.ert": "dfs", "jkl": "asd"}
|
||||
// Merge labels and annotations into docker labels.
|
||||
dockerLabels := makeLabels(expectedLabels, expectedAnnotations)
|
||||
// Extract labels and annotations from docker labels.
|
||||
actualLabels, actualAnnotations := extractLabels(dockerLabels)
|
||||
assert.Equal(t, expectedLabels, actualLabels)
|
||||
assert.Equal(t, expectedAnnotations, actualAnnotations)
|
||||
}
|
||||
|
||||
// TestGetContainerSecurityOpts tests the logic of generating container security options from sandbox annotations.
|
||||
// The actual profile loading logic is tested in dockertools.
|
||||
// TODO: Migrate the corresponding test to dockershim.
|
||||
func TestGetContainerSecurityOpts(t *testing.T) {
|
||||
containerName := "bar"
|
||||
makeConfig := func(annotations map[string]string) *runtimeapi.PodSandboxConfig {
|
||||
return makeSandboxConfigWithLabelsAndAnnotations("pod", "ns", "1234", 1, nil, annotations)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
msg string
|
||||
config *runtimeapi.PodSandboxConfig
|
||||
expectedOpts []string
|
||||
}{{
|
||||
msg: "No security annotations",
|
||||
config: makeConfig(nil),
|
||||
expectedOpts: []string{"seccomp=unconfined"},
|
||||
}, {
|
||||
msg: "Seccomp unconfined",
|
||||
config: makeConfig(map[string]string{
|
||||
v1.SeccompContainerAnnotationKeyPrefix + containerName: "unconfined",
|
||||
}),
|
||||
expectedOpts: []string{"seccomp=unconfined"},
|
||||
}, {
|
||||
msg: "Seccomp default",
|
||||
config: makeConfig(map[string]string{
|
||||
v1.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default",
|
||||
}),
|
||||
expectedOpts: nil,
|
||||
}, {
|
||||
msg: "Seccomp pod default",
|
||||
config: makeConfig(map[string]string{
|
||||
v1.SeccompPodAnnotationKey: "docker/default",
|
||||
}),
|
||||
expectedOpts: nil,
|
||||
}, {
|
||||
msg: "AppArmor runtime/default",
|
||||
config: makeConfig(map[string]string{
|
||||
apparmor.ContainerAnnotationKeyPrefix + containerName: apparmor.ProfileRuntimeDefault,
|
||||
}),
|
||||
expectedOpts: []string{"seccomp=unconfined"},
|
||||
}, {
|
||||
msg: "AppArmor local profile",
|
||||
config: makeConfig(map[string]string{
|
||||
apparmor.ContainerAnnotationKeyPrefix + containerName: apparmor.ProfileNamePrefix + "foo",
|
||||
}),
|
||||
expectedOpts: []string{"seccomp=unconfined", "apparmor=foo"},
|
||||
}, {
|
||||
msg: "AppArmor and seccomp profile",
|
||||
config: makeConfig(map[string]string{
|
||||
v1.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default",
|
||||
apparmor.ContainerAnnotationKeyPrefix + containerName: apparmor.ProfileNamePrefix + "foo",
|
||||
}),
|
||||
expectedOpts: []string{"apparmor=foo"},
|
||||
}}
|
||||
|
||||
for i, test := range tests {
|
||||
opts, err := getContainerSecurityOpts(containerName, test.config, "test/seccomp/profile/root")
|
||||
assert.NoError(t, err, "TestCase[%d]: %s", i, test.msg)
|
||||
assert.Len(t, opts, len(test.expectedOpts), "TestCase[%d]: %s", i, test.msg)
|
||||
for _, opt := range test.expectedOpts {
|
||||
assert.Contains(t, opts, opt, "TestCase[%d]: %s", i, test.msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetSandboxSecurityOpts tests the logic of generating sandbox security options from sandbox annotations.
|
||||
func TestGetSandboxSecurityOpts(t *testing.T) {
|
||||
makeConfig := func(annotations map[string]string) *runtimeapi.PodSandboxConfig {
|
||||
return makeSandboxConfigWithLabelsAndAnnotations("pod", "ns", "1234", 1, nil, annotations)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
msg string
|
||||
config *runtimeapi.PodSandboxConfig
|
||||
expectedOpts []string
|
||||
}{{
|
||||
msg: "No security annotations",
|
||||
config: makeConfig(nil),
|
||||
expectedOpts: []string{"seccomp=unconfined"},
|
||||
}, {
|
||||
msg: "Seccomp default",
|
||||
config: makeConfig(map[string]string{
|
||||
v1.SeccompPodAnnotationKey: "docker/default",
|
||||
}),
|
||||
expectedOpts: nil,
|
||||
}, {
|
||||
msg: "Seccomp unconfined",
|
||||
config: makeConfig(map[string]string{
|
||||
v1.SeccompPodAnnotationKey: "unconfined",
|
||||
}),
|
||||
expectedOpts: []string{"seccomp=unconfined"},
|
||||
}, {
|
||||
msg: "Seccomp pod and container profile",
|
||||
config: makeConfig(map[string]string{
|
||||
v1.SeccompContainerAnnotationKeyPrefix + "test-container": "unconfined",
|
||||
v1.SeccompPodAnnotationKey: "docker/default",
|
||||
}),
|
||||
expectedOpts: nil,
|
||||
}}
|
||||
|
||||
for i, test := range tests {
|
||||
opts, err := getSandboxSecurityOpts(test.config, "test/seccomp/profile/root")
|
||||
assert.NoError(t, err, "TestCase[%d]: %s", i, test.msg)
|
||||
assert.Len(t, opts, len(test.expectedOpts), "TestCase[%d]: %s", i, test.msg)
|
||||
for _, opt := range test.expectedOpts {
|
||||
assert.Contains(t, opts, opt, "TestCase[%d]: %s", i, test.msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetSystclsFromAnnotations tests the logic of getting sysctls from annotations.
|
||||
func TestGetSystclsFromAnnotations(t *testing.T) {
|
||||
tests := []struct {
|
||||
annotations map[string]string
|
||||
expectedSysctls map[string]string
|
||||
}{{
|
||||
annotations: map[string]string{
|
||||
v1.SysctlsPodAnnotationKey: "kernel.shmmni=32768,kernel.shmmax=1000000000",
|
||||
v1.UnsafeSysctlsPodAnnotationKey: "knet.ipv4.route.min_pmtu=1000",
|
||||
},
|
||||
expectedSysctls: map[string]string{
|
||||
"kernel.shmmni": "32768",
|
||||
"kernel.shmmax": "1000000000",
|
||||
"knet.ipv4.route.min_pmtu": "1000",
|
||||
},
|
||||
}, {
|
||||
annotations: map[string]string{
|
||||
v1.SysctlsPodAnnotationKey: "kernel.shmmni=32768,kernel.shmmax=1000000000",
|
||||
},
|
||||
expectedSysctls: map[string]string{
|
||||
"kernel.shmmni": "32768",
|
||||
"kernel.shmmax": "1000000000",
|
||||
},
|
||||
}, {
|
||||
annotations: map[string]string{
|
||||
v1.UnsafeSysctlsPodAnnotationKey: "knet.ipv4.route.min_pmtu=1000",
|
||||
},
|
||||
expectedSysctls: map[string]string{
|
||||
"knet.ipv4.route.min_pmtu": "1000",
|
||||
},
|
||||
}}
|
||||
|
||||
for i, test := range tests {
|
||||
actual, err := getSysctlsFromAnnotations(test.annotations)
|
||||
assert.NoError(t, err, "TestCase[%d]", i)
|
||||
assert.Len(t, actual, len(test.expectedSysctls), "TestCase[%d]", i)
|
||||
assert.Equal(t, test.expectedSysctls, actual, "TestCase[%d]", i)
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetUserFromImageUser tests the logic of getting image uid or user name of image user.
|
||||
func TestGetUserFromImageUser(t *testing.T) {
|
||||
newI64 := func(i int64) *int64 { return &i }
|
||||
newStr := func(s string) *string { return &s }
|
||||
for c, test := range map[string]struct {
|
||||
user string
|
||||
uid *int64
|
||||
name *string
|
||||
}{
|
||||
"no gid": {
|
||||
user: "0",
|
||||
uid: newI64(0),
|
||||
},
|
||||
"uid/gid": {
|
||||
user: "0:1",
|
||||
uid: newI64(0),
|
||||
},
|
||||
"empty user": {
|
||||
user: "",
|
||||
},
|
||||
"multiple spearators": {
|
||||
user: "1:2:3",
|
||||
uid: newI64(1),
|
||||
},
|
||||
"root username": {
|
||||
user: "root:root",
|
||||
name: newStr("root"),
|
||||
},
|
||||
"username": {
|
||||
user: "test:test",
|
||||
name: newStr("test"),
|
||||
},
|
||||
} {
|
||||
t.Logf("TestCase - %q", c)
|
||||
actualUID, actualName := getUserFromImageUser(test.user)
|
||||
assert.Equal(t, test.uid, actualUID)
|
||||
assert.Equal(t, test.name, actualName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsingCreationConflictError(t *testing.T) {
|
||||
// Expected error message from docker.
|
||||
msg := "Conflict. The name \"/k8s_POD_pfpod_e2e-tests-port-forwarding-dlxt2_81a3469e-99e1-11e6-89f2-42010af00002_0\" is already in use by container 24666ab8c814d16f986449e504ea0159468ddf8da01897144a770f66dce0e14e. You have to remove (or rename) that container to be able to reuse that name."
|
||||
|
||||
matches := conflictRE.FindStringSubmatch(msg)
|
||||
require.Len(t, matches, 2)
|
||||
require.Equal(t, matches[1], "24666ab8c814d16f986449e504ea0159468ddf8da01897144a770f66dce0e14e")
|
||||
}
|
||||
137
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/naming.go
generated
vendored
Normal file
137
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/naming.go
generated
vendored
Normal file
|
|
@ -0,0 +1,137 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockershim
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
"k8s.io/kubernetes/pkg/kubelet/leaky"
|
||||
)
|
||||
|
||||
// Container "names" are implementation details that do not concern
|
||||
// kubelet/CRI. This CRI shim uses names to fulfill the CRI requirement to
|
||||
// make sandbox/container creation idempotent. CRI states that there can
|
||||
// only exist one sandbox/container with the given metadata. To enforce this,
|
||||
// this shim constructs a name using the fields in the metadata so that
|
||||
// docker will reject the creation request if the name already exists.
|
||||
//
|
||||
// Note that changes to naming will likely break the backward compatibility.
|
||||
// Code must be added to ensure the shim knows how to recognize and extract
|
||||
// information the older containers.
|
||||
//
|
||||
// TODO: Add code to handle backward compatibility, i.e., making sure we can
|
||||
// recognize older containers and extract information from their names if
|
||||
// necessary.
|
||||
|
||||
const (
|
||||
// kubePrefix is used to identify the containers/sandboxes on the node managed by kubelet
|
||||
kubePrefix = "k8s"
|
||||
// sandboxContainerName is a string to include in the docker container so
|
||||
// that users can easily identify the sandboxes.
|
||||
sandboxContainerName = leaky.PodInfraContainerName
|
||||
// Delimiter used to construct docker container names.
|
||||
nameDelimiter = "_"
|
||||
// DockerImageIDPrefix is the prefix of image id in container status.
|
||||
DockerImageIDPrefix = dockertools.DockerPrefix
|
||||
// DockerPullableImageIDPrefix is the prefix of pullable image id in container status.
|
||||
DockerPullableImageIDPrefix = dockertools.DockerPullablePrefix
|
||||
)
|
||||
|
||||
func makeSandboxName(s *runtimeapi.PodSandboxConfig) string {
|
||||
return strings.Join([]string{
|
||||
kubePrefix, // 0
|
||||
sandboxContainerName, // 1
|
||||
s.Metadata.GetName(), // 2
|
||||
s.Metadata.GetNamespace(), // 3
|
||||
s.Metadata.GetUid(), // 4
|
||||
fmt.Sprintf("%d", s.Metadata.GetAttempt()), // 5
|
||||
}, nameDelimiter)
|
||||
}
|
||||
|
||||
func makeContainerName(s *runtimeapi.PodSandboxConfig, c *runtimeapi.ContainerConfig) string {
|
||||
return strings.Join([]string{
|
||||
kubePrefix, // 0
|
||||
c.Metadata.GetName(), // 1:
|
||||
s.Metadata.GetName(), // 2: sandbox name
|
||||
s.Metadata.GetNamespace(), // 3: sandbox namesapce
|
||||
s.Metadata.GetUid(), // 4 sandbox uid
|
||||
fmt.Sprintf("%d", c.Metadata.GetAttempt()), // 5
|
||||
}, nameDelimiter)
|
||||
|
||||
}
|
||||
|
||||
func parseUint32(s string) (uint32, error) {
|
||||
n, err := strconv.ParseUint(s, 10, 32)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint32(n), nil
|
||||
}
|
||||
|
||||
// TODO: Evaluate whether we should rely on labels completely.
|
||||
func parseSandboxName(name string) (*runtimeapi.PodSandboxMetadata, error) {
|
||||
// Docker adds a "/" prefix to names. so trim it.
|
||||
name = strings.TrimPrefix(name, "/")
|
||||
|
||||
parts := strings.Split(name, nameDelimiter)
|
||||
if len(parts) != 6 {
|
||||
return nil, fmt.Errorf("failed to parse the sandbox name: %q", name)
|
||||
}
|
||||
if parts[0] != kubePrefix {
|
||||
return nil, fmt.Errorf("container is not managed by kubernetes: %q", name)
|
||||
}
|
||||
|
||||
attempt, err := parseUint32(parts[5])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse the sandbox name %q: %v", name, err)
|
||||
}
|
||||
|
||||
return &runtimeapi.PodSandboxMetadata{
|
||||
Name: &parts[2],
|
||||
Namespace: &parts[3],
|
||||
Uid: &parts[4],
|
||||
Attempt: &attempt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TODO: Evaluate whether we should rely on labels completely.
|
||||
func parseContainerName(name string) (*runtimeapi.ContainerMetadata, error) {
|
||||
// Docker adds a "/" prefix to names. so trim it.
|
||||
name = strings.TrimPrefix(name, "/")
|
||||
|
||||
parts := strings.Split(name, nameDelimiter)
|
||||
if len(parts) != 6 {
|
||||
return nil, fmt.Errorf("failed to parse the container name: %q", name)
|
||||
}
|
||||
if parts[0] != kubePrefix {
|
||||
return nil, fmt.Errorf("container is not managed by kubernetes: %q", name)
|
||||
}
|
||||
|
||||
attempt, err := parseUint32(parts[5])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse the container name %q: %v", name, err)
|
||||
}
|
||||
|
||||
return &runtimeapi.ContainerMetadata{
|
||||
Name: &parts[1],
|
||||
Attempt: &attempt,
|
||||
}, nil
|
||||
}
|
||||
84
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/naming_test.go
generated
vendored
Normal file
84
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/naming_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockershim
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
func TestSandboxNameRoundTrip(t *testing.T) {
|
||||
config := makeSandboxConfig("foo", "bar", "iamuid", 3)
|
||||
actualName := makeSandboxName(config)
|
||||
assert.Equal(t, "k8s_POD_foo_bar_iamuid_3", actualName)
|
||||
|
||||
actualMetadata, err := parseSandboxName(actualName)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, config.Metadata, actualMetadata)
|
||||
}
|
||||
|
||||
func TestNonParsableSandboxNames(t *testing.T) {
|
||||
// All names must start with the kubernetes prefix "k8s".
|
||||
_, err := parseSandboxName("owner_POD_foo_bar_iamuid_4")
|
||||
assert.Error(t, err)
|
||||
|
||||
// All names must contain exactly 6 parts.
|
||||
_, err = parseSandboxName("k8s_POD_dummy_foo_bar_iamuid_4")
|
||||
assert.Error(t, err)
|
||||
_, err = parseSandboxName("k8s_foo_bar_iamuid_4")
|
||||
assert.Error(t, err)
|
||||
|
||||
// Should be able to parse attempt number.
|
||||
_, err = parseSandboxName("k8s_POD_foo_bar_iamuid_notanumber")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestContainerNameRoundTrip(t *testing.T) {
|
||||
sConfig := makeSandboxConfig("foo", "bar", "iamuid", 3)
|
||||
name, attempt := "pause", uint32(5)
|
||||
config := &runtimeapi.ContainerConfig{
|
||||
Metadata: &runtimeapi.ContainerMetadata{
|
||||
Name: &name,
|
||||
Attempt: &attempt,
|
||||
},
|
||||
}
|
||||
actualName := makeContainerName(sConfig, config)
|
||||
assert.Equal(t, "k8s_pause_foo_bar_iamuid_5", actualName)
|
||||
|
||||
actualMetadata, err := parseContainerName(actualName)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, config.Metadata, actualMetadata)
|
||||
}
|
||||
|
||||
func TestNonParsableContainerNames(t *testing.T) {
|
||||
// All names must start with the kubernetes prefix "k8s".
|
||||
_, err := parseContainerName("owner_frontend_foo_bar_iamuid_4")
|
||||
assert.Error(t, err)
|
||||
|
||||
// All names must contain exactly 6 parts.
|
||||
_, err = parseContainerName("k8s_frontend_dummy_foo_bar_iamuid_4")
|
||||
assert.Error(t, err)
|
||||
_, err = parseContainerName("k8s_foo_bar_iamuid_4")
|
||||
assert.Error(t, err)
|
||||
|
||||
// Should be able to parse attempt number.
|
||||
_, err = parseContainerName("k8s_frontend_foo_bar_iamuid_notanumber")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
30
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/BUILD
generated
vendored
Normal file
30
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"docker_server.go",
|
||||
"docker_service.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/kubelet/api:go_default_library",
|
||||
"//pkg/kubelet/api/v1alpha1/runtime:go_default_library",
|
||||
"//pkg/kubelet/dockershim:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/interrupt:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:golang.org/x/net/context",
|
||||
"//vendor:google.golang.org/grpc",
|
||||
],
|
||||
)
|
||||
89
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/docker_server.go
generated
vendored
Normal file
89
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/docker_server.go
generated
vendored
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim"
|
||||
"k8s.io/kubernetes/pkg/util/interrupt"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultEndpoint is the default address of dockershim grpc server socket.
|
||||
defaultAddress = "/var/run/dockershim.sock"
|
||||
// unixProtocol is the network protocol of unix socket.
|
||||
unixProtocol = "unix"
|
||||
)
|
||||
|
||||
// DockerServer is the grpc server of dockershim.
|
||||
type DockerServer struct {
|
||||
// addr is the address to serve on.
|
||||
addr string
|
||||
// service is the docker service which implements runtime and image services.
|
||||
service DockerService
|
||||
// server is the grpc server.
|
||||
server *grpc.Server
|
||||
}
|
||||
|
||||
// NewDockerServer creates the dockershim grpc server.
|
||||
func NewDockerServer(addr string, s dockershim.DockerService) *DockerServer {
|
||||
return &DockerServer{
|
||||
addr: addr,
|
||||
service: NewDockerService(s),
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the dockershim grpc server.
|
||||
func (s *DockerServer) Start() error {
|
||||
glog.V(2).Infof("Start dockershim grpc server")
|
||||
// Unlink to cleanup the previous socket file.
|
||||
err := syscall.Unlink(s.addr)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("failed to unlink socket file %q: %v", s.addr, err)
|
||||
}
|
||||
l, err := net.Listen(unixProtocol, s.addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to listen on %q: %v", s.addr, err)
|
||||
}
|
||||
// Create the grpc server and register runtime and image services.
|
||||
s.server = grpc.NewServer()
|
||||
runtimeapi.RegisterRuntimeServiceServer(s.server, s.service)
|
||||
runtimeapi.RegisterImageServiceServer(s.server, s.service)
|
||||
go func() {
|
||||
// Use interrupt handler to make sure the server to be stopped properly.
|
||||
h := interrupt.New(nil, s.Stop)
|
||||
err := h.Run(func() error { return s.server.Serve(l) })
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to serve connections: %v", err)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops the dockershim grpc server.
|
||||
func (s *DockerServer) Stop() {
|
||||
glog.V(2).Infof("Stop docker server")
|
||||
s.server.Stop()
|
||||
}
|
||||
216
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/docker_service.go
generated
vendored
Normal file
216
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/docker_service.go
generated
vendored
Normal file
|
|
@ -0,0 +1,216 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/api"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim"
|
||||
utilexec "k8s.io/kubernetes/pkg/util/exec"
|
||||
)
|
||||
|
||||
// DockerService is the interface implement CRI remote service server.
|
||||
type DockerService interface {
|
||||
runtimeapi.RuntimeServiceServer
|
||||
runtimeapi.ImageServiceServer
|
||||
}
|
||||
|
||||
// dockerService uses dockershim service to implement DockerService.
|
||||
// Notice that the contexts in the functions are not used now.
|
||||
// TODO(random-liu): Change the dockershim service to support context, and implement
|
||||
// internal services and remote services with the dockershim service.
|
||||
type dockerService struct {
|
||||
runtimeService internalapi.RuntimeService
|
||||
imageService internalapi.ImageManagerService
|
||||
}
|
||||
|
||||
func NewDockerService(s dockershim.DockerService) DockerService {
|
||||
return &dockerService{runtimeService: s, imageService: s}
|
||||
}
|
||||
|
||||
func (d *dockerService) Version(ctx context.Context, r *runtimeapi.VersionRequest) (*runtimeapi.VersionResponse, error) {
|
||||
return d.runtimeService.Version(r.GetVersion())
|
||||
}
|
||||
|
||||
func (d *dockerService) Status(ctx context.Context, r *runtimeapi.StatusRequest) (*runtimeapi.StatusResponse, error) {
|
||||
status, err := d.runtimeService.Status()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.StatusResponse{Status: status}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) RunPodSandbox(ctx context.Context, r *runtimeapi.RunPodSandboxRequest) (*runtimeapi.RunPodSandboxResponse, error) {
|
||||
podSandboxId, err := d.runtimeService.RunPodSandbox(r.GetConfig())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.RunPodSandboxResponse{PodSandboxId: &podSandboxId}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) StopPodSandbox(ctx context.Context, r *runtimeapi.StopPodSandboxRequest) (*runtimeapi.StopPodSandboxResponse, error) {
|
||||
err := d.runtimeService.StopPodSandbox(r.GetPodSandboxId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.StopPodSandboxResponse{}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) RemovePodSandbox(ctx context.Context, r *runtimeapi.RemovePodSandboxRequest) (*runtimeapi.RemovePodSandboxResponse, error) {
|
||||
err := d.runtimeService.RemovePodSandbox(r.GetPodSandboxId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.RemovePodSandboxResponse{}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) PodSandboxStatus(ctx context.Context, r *runtimeapi.PodSandboxStatusRequest) (*runtimeapi.PodSandboxStatusResponse, error) {
|
||||
podSandboxStatus, err := d.runtimeService.PodSandboxStatus(r.GetPodSandboxId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.PodSandboxStatusResponse{Status: podSandboxStatus}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) ListPodSandbox(ctx context.Context, r *runtimeapi.ListPodSandboxRequest) (*runtimeapi.ListPodSandboxResponse, error) {
|
||||
items, err := d.runtimeService.ListPodSandbox(r.GetFilter())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.ListPodSandboxResponse{Items: items}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) CreateContainer(ctx context.Context, r *runtimeapi.CreateContainerRequest) (*runtimeapi.CreateContainerResponse, error) {
|
||||
containerId, err := d.runtimeService.CreateContainer(r.GetPodSandboxId(), r.GetConfig(), r.GetSandboxConfig())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.CreateContainerResponse{ContainerId: &containerId}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) StartContainer(ctx context.Context, r *runtimeapi.StartContainerRequest) (*runtimeapi.StartContainerResponse, error) {
|
||||
err := d.runtimeService.StartContainer(r.GetContainerId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.StartContainerResponse{}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) StopContainer(ctx context.Context, r *runtimeapi.StopContainerRequest) (*runtimeapi.StopContainerResponse, error) {
|
||||
err := d.runtimeService.StopContainer(r.GetContainerId(), r.GetTimeout())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.StopContainerResponse{}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) RemoveContainer(ctx context.Context, r *runtimeapi.RemoveContainerRequest) (*runtimeapi.RemoveContainerResponse, error) {
|
||||
err := d.runtimeService.RemoveContainer(r.GetContainerId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.RemoveContainerResponse{}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) ListContainers(ctx context.Context, r *runtimeapi.ListContainersRequest) (*runtimeapi.ListContainersResponse, error) {
|
||||
containers, err := d.runtimeService.ListContainers(r.GetFilter())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.ListContainersResponse{Containers: containers}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) ContainerStatus(ctx context.Context, r *runtimeapi.ContainerStatusRequest) (*runtimeapi.ContainerStatusResponse, error) {
|
||||
status, err := d.runtimeService.ContainerStatus(r.GetContainerId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.ContainerStatusResponse{Status: status}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) ExecSync(ctx context.Context, r *runtimeapi.ExecSyncRequest) (*runtimeapi.ExecSyncResponse, error) {
|
||||
stdout, stderr, err := d.runtimeService.ExecSync(r.GetContainerId(), r.GetCmd(), time.Duration(r.GetTimeout())*time.Second)
|
||||
var exitCode int32
|
||||
if err != nil {
|
||||
exitError, ok := err.(utilexec.ExitError)
|
||||
if !ok {
|
||||
return nil, err
|
||||
}
|
||||
exitCode = int32(exitError.ExitStatus())
|
||||
}
|
||||
return &runtimeapi.ExecSyncResponse{
|
||||
Stdout: stdout,
|
||||
Stderr: stderr,
|
||||
ExitCode: &exitCode,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) Exec(ctx context.Context, r *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
|
||||
return d.runtimeService.Exec(r)
|
||||
}
|
||||
|
||||
func (d *dockerService) Attach(ctx context.Context, r *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
|
||||
return d.runtimeService.Attach(r)
|
||||
}
|
||||
|
||||
func (d *dockerService) PortForward(ctx context.Context, r *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
|
||||
return d.runtimeService.PortForward(r)
|
||||
}
|
||||
|
||||
func (d *dockerService) UpdateRuntimeConfig(ctx context.Context, r *runtimeapi.UpdateRuntimeConfigRequest) (*runtimeapi.UpdateRuntimeConfigResponse, error) {
|
||||
err := d.runtimeService.UpdateRuntimeConfig(r.GetRuntimeConfig())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.UpdateRuntimeConfigResponse{}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) ListImages(ctx context.Context, r *runtimeapi.ListImagesRequest) (*runtimeapi.ListImagesResponse, error) {
|
||||
images, err := d.imageService.ListImages(r.GetFilter())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.ListImagesResponse{Images: images}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) ImageStatus(ctx context.Context, r *runtimeapi.ImageStatusRequest) (*runtimeapi.ImageStatusResponse, error) {
|
||||
image, err := d.imageService.ImageStatus(r.GetImage())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.ImageStatusResponse{Image: image}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) PullImage(ctx context.Context, r *runtimeapi.PullImageRequest) (*runtimeapi.PullImageResponse, error) {
|
||||
err := d.imageService.PullImage(r.GetImage(), r.GetAuth())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.PullImageResponse{}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) RemoveImage(ctx context.Context, r *runtimeapi.RemoveImageRequest) (*runtimeapi.RemoveImageResponse, error) {
|
||||
err := d.imageService.RemoveImage(r.GetImage())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.RemoveImageResponse{}, nil
|
||||
}
|
||||
159
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context.go
generated
vendored
Normal file
159
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context.go
generated
vendored
Normal file
|
|
@ -0,0 +1,159 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockershim
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
dockercontainer "github.com/docker/engine-api/types/container"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
)
|
||||
|
||||
// applySandboxSecurityContext updates docker sandbox options according to security context.
|
||||
func applySandboxSecurityContext(lc *runtimeapi.LinuxPodSandboxConfig, config *dockercontainer.Config, hc *dockercontainer.HostConfig) {
|
||||
if lc == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var sc *runtimeapi.LinuxContainerSecurityContext
|
||||
if lc.SecurityContext != nil {
|
||||
sc = &runtimeapi.LinuxContainerSecurityContext{
|
||||
SupplementalGroups: lc.SecurityContext.SupplementalGroups,
|
||||
RunAsUser: lc.SecurityContext.RunAsUser,
|
||||
ReadonlyRootfs: lc.SecurityContext.ReadonlyRootfs,
|
||||
SelinuxOptions: lc.SecurityContext.SelinuxOptions,
|
||||
NamespaceOptions: lc.SecurityContext.NamespaceOptions,
|
||||
}
|
||||
}
|
||||
|
||||
modifyContainerConfig(sc, config)
|
||||
modifyHostConfig(sc, "", hc)
|
||||
}
|
||||
|
||||
// applyContainerSecurityContext updates docker container options according to security context.
|
||||
func applyContainerSecurityContext(lc *runtimeapi.LinuxContainerConfig, sandboxID string, config *dockercontainer.Config, hc *dockercontainer.HostConfig) {
|
||||
if lc == nil {
|
||||
return
|
||||
}
|
||||
|
||||
modifyContainerConfig(lc.SecurityContext, config)
|
||||
modifyHostConfig(lc.SecurityContext, sandboxID, hc)
|
||||
return
|
||||
}
|
||||
|
||||
// modifyContainerConfig applies container security context config to dockercontainer.Config.
|
||||
func modifyContainerConfig(sc *runtimeapi.LinuxContainerSecurityContext, config *dockercontainer.Config) {
|
||||
if sc == nil {
|
||||
return
|
||||
}
|
||||
if sc.RunAsUser != nil {
|
||||
config.User = strconv.FormatInt(sc.GetRunAsUser(), 10)
|
||||
}
|
||||
if sc.RunAsUsername != nil {
|
||||
config.User = sc.GetRunAsUsername()
|
||||
}
|
||||
}
|
||||
|
||||
// modifyHostConfig applies security context config to dockercontainer.HostConfig.
|
||||
func modifyHostConfig(sc *runtimeapi.LinuxContainerSecurityContext, sandboxID string, hostConfig *dockercontainer.HostConfig) {
|
||||
// Apply namespace options.
|
||||
modifyNamespaceOptions(sc.GetNamespaceOptions(), sandboxID, hostConfig)
|
||||
|
||||
if sc == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Apply supplemental groups.
|
||||
for _, group := range sc.SupplementalGroups {
|
||||
hostConfig.GroupAdd = append(hostConfig.GroupAdd, strconv.FormatInt(group, 10))
|
||||
}
|
||||
|
||||
// Apply security context for the container.
|
||||
if sc.Privileged != nil {
|
||||
hostConfig.Privileged = sc.GetPrivileged()
|
||||
}
|
||||
if sc.ReadonlyRootfs != nil {
|
||||
hostConfig.ReadonlyRootfs = sc.GetReadonlyRootfs()
|
||||
}
|
||||
if sc.Capabilities != nil {
|
||||
hostConfig.CapAdd = sc.GetCapabilities().GetAddCapabilities()
|
||||
hostConfig.CapDrop = sc.GetCapabilities().GetDropCapabilities()
|
||||
}
|
||||
if sc.SelinuxOptions != nil {
|
||||
hostConfig.SecurityOpt = securitycontext.ModifySecurityOptions(
|
||||
hostConfig.SecurityOpt,
|
||||
&v1.SELinuxOptions{
|
||||
User: sc.SelinuxOptions.GetUser(),
|
||||
Role: sc.SelinuxOptions.GetRole(),
|
||||
Type: sc.SelinuxOptions.GetType(),
|
||||
Level: sc.SelinuxOptions.GetLevel(),
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// modifyNamespaceOptions applies namespaceoptions to dockercontainer.HostConfig.
|
||||
func modifyNamespaceOptions(nsOpts *runtimeapi.NamespaceOption, sandboxID string, hostConfig *dockercontainer.HostConfig) {
|
||||
hostNetwork := false
|
||||
if nsOpts != nil {
|
||||
if nsOpts.HostNetwork != nil {
|
||||
hostNetwork = nsOpts.GetHostNetwork()
|
||||
}
|
||||
if nsOpts.GetHostPid() {
|
||||
hostConfig.PidMode = namespaceModeHost
|
||||
}
|
||||
if nsOpts.GetHostIpc() {
|
||||
hostConfig.IpcMode = namespaceModeHost
|
||||
}
|
||||
}
|
||||
|
||||
// Set for sandbox if sandboxID is not provided.
|
||||
if sandboxID == "" {
|
||||
modifyHostNetworkOptionForSandbox(hostNetwork, hostConfig)
|
||||
} else {
|
||||
// Set for container if sandboxID is provided.
|
||||
modifyHostNetworkOptionForContainer(hostNetwork, sandboxID, hostConfig)
|
||||
}
|
||||
}
|
||||
|
||||
// modifyHostNetworkOptionForSandbox applies NetworkMode/UTSMode to sandbox's dockercontainer.HostConfig.
|
||||
func modifyHostNetworkOptionForSandbox(hostNetwork bool, hc *dockercontainer.HostConfig) {
|
||||
if hostNetwork {
|
||||
hc.NetworkMode = namespaceModeHost
|
||||
} else {
|
||||
// Assume kubelet uses either the cni or the kubenet plugin.
|
||||
// TODO: support docker networking.
|
||||
hc.NetworkMode = "none"
|
||||
}
|
||||
}
|
||||
|
||||
// modifyHostNetworkOptionForContainer applies NetworkMode/UTSMode to container's dockercontainer.HostConfig.
|
||||
func modifyHostNetworkOptionForContainer(hostNetwork bool, sandboxID string, hc *dockercontainer.HostConfig) {
|
||||
sandboxNSMode := fmt.Sprintf("container:%v", sandboxID)
|
||||
hc.NetworkMode = dockercontainer.NetworkMode(sandboxNSMode)
|
||||
hc.IpcMode = dockercontainer.IpcMode(sandboxNSMode)
|
||||
hc.UTSMode = ""
|
||||
hc.PidMode = ""
|
||||
|
||||
if hostNetwork {
|
||||
hc.UTSMode = namespaceModeHost
|
||||
}
|
||||
}
|
||||
275
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context_test.go
generated
vendored
Normal file
275
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,275 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockershim
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
dockercontainer "github.com/docker/engine-api/types/container"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
)
|
||||
|
||||
func TestModifyContainerConfig(t *testing.T) {
|
||||
var uid int64 = 123
|
||||
var username string = "testuser"
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
sc *runtimeapi.LinuxContainerSecurityContext
|
||||
expected *dockercontainer.Config
|
||||
}{
|
||||
{
|
||||
name: "container.SecurityContext.RunAsUser set",
|
||||
sc: &runtimeapi.LinuxContainerSecurityContext{
|
||||
RunAsUser: &uid,
|
||||
},
|
||||
expected: &dockercontainer.Config{
|
||||
User: strconv.FormatInt(uid, 10),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "container.SecurityContext.RunAsUsername set",
|
||||
sc: &runtimeapi.LinuxContainerSecurityContext{
|
||||
RunAsUsername: &username,
|
||||
},
|
||||
expected: &dockercontainer.Config{
|
||||
User: username,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no RunAsUser value set",
|
||||
sc: &runtimeapi.LinuxContainerSecurityContext{},
|
||||
expected: &dockercontainer.Config{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
dockerCfg := &dockercontainer.Config{}
|
||||
modifyContainerConfig(tc.sc, dockerCfg)
|
||||
assert.Equal(t, tc.expected, dockerCfg, "[Test case %q]", tc.name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestModifyHostConfig(t *testing.T) {
|
||||
priv := true
|
||||
setNetworkHC := &dockercontainer.HostConfig{
|
||||
NetworkMode: "none",
|
||||
}
|
||||
setPrivSC := &runtimeapi.LinuxContainerSecurityContext{}
|
||||
setPrivSC.Privileged = &priv
|
||||
setPrivHC := &dockercontainer.HostConfig{
|
||||
Privileged: true,
|
||||
NetworkMode: "none",
|
||||
}
|
||||
setCapsHC := &dockercontainer.HostConfig{
|
||||
NetworkMode: "none",
|
||||
CapAdd: []string{"addCapA", "addCapB"},
|
||||
CapDrop: []string{"dropCapA", "dropCapB"},
|
||||
}
|
||||
setSELinuxHC := &dockercontainer.HostConfig{
|
||||
NetworkMode: "none",
|
||||
SecurityOpt: []string{
|
||||
fmt.Sprintf("%s:%s", securitycontext.DockerLabelUser, "user"),
|
||||
fmt.Sprintf("%s:%s", securitycontext.DockerLabelRole, "role"),
|
||||
fmt.Sprintf("%s:%s", securitycontext.DockerLabelType, "type"),
|
||||
fmt.Sprintf("%s:%s", securitycontext.DockerLabelLevel, "level"),
|
||||
},
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
sc *runtimeapi.LinuxContainerSecurityContext
|
||||
expected *dockercontainer.HostConfig
|
||||
}{
|
||||
{
|
||||
name: "fully set container.SecurityContext",
|
||||
sc: fullValidSecurityContext(),
|
||||
expected: fullValidHostConfig(),
|
||||
},
|
||||
{
|
||||
name: "empty container.SecurityContext",
|
||||
sc: &runtimeapi.LinuxContainerSecurityContext{},
|
||||
expected: setNetworkHC,
|
||||
},
|
||||
{
|
||||
name: "container.SecurityContext.Privileged",
|
||||
sc: setPrivSC,
|
||||
expected: setPrivHC,
|
||||
},
|
||||
{
|
||||
name: "container.SecurityContext.Capabilities",
|
||||
sc: &runtimeapi.LinuxContainerSecurityContext{
|
||||
Capabilities: inputCapabilities(),
|
||||
},
|
||||
expected: setCapsHC,
|
||||
},
|
||||
{
|
||||
name: "container.SecurityContext.SELinuxOptions",
|
||||
sc: &runtimeapi.LinuxContainerSecurityContext{
|
||||
SelinuxOptions: inputSELinuxOptions(),
|
||||
},
|
||||
expected: setSELinuxHC,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
dockerCfg := &dockercontainer.HostConfig{}
|
||||
modifyHostConfig(tc.sc, "", dockerCfg)
|
||||
assert.Equal(t, tc.expected, dockerCfg, "[Test case %q]", tc.name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestModifyHostConfigWithGroups(t *testing.T) {
|
||||
supplementalGroupsSC := &runtimeapi.LinuxContainerSecurityContext{}
|
||||
supplementalGroupsSC.SupplementalGroups = []int64{2222}
|
||||
supplementalGroupHC := &dockercontainer.HostConfig{NetworkMode: "none"}
|
||||
supplementalGroupHC.GroupAdd = []string{"2222"}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
securityContext *runtimeapi.LinuxContainerSecurityContext
|
||||
expected *dockercontainer.HostConfig
|
||||
}{
|
||||
{
|
||||
name: "nil",
|
||||
securityContext: nil,
|
||||
expected: &dockercontainer.HostConfig{NetworkMode: "none"},
|
||||
},
|
||||
{
|
||||
name: "SupplementalGroup",
|
||||
securityContext: supplementalGroupsSC,
|
||||
expected: supplementalGroupHC,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
dockerCfg := &dockercontainer.HostConfig{}
|
||||
modifyHostConfig(tc.securityContext, "", dockerCfg)
|
||||
assert.Equal(t, tc.expected, dockerCfg, "[Test case %q]", tc.name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestModifyHostConfigWithSandboxID(t *testing.T) {
|
||||
priv := true
|
||||
sandboxID := "sandbox"
|
||||
sandboxNSMode := fmt.Sprintf("container:%v", sandboxID)
|
||||
setPrivSC := &runtimeapi.LinuxContainerSecurityContext{}
|
||||
setPrivSC.Privileged = &priv
|
||||
setPrivHC := &dockercontainer.HostConfig{
|
||||
Privileged: true,
|
||||
IpcMode: dockercontainer.IpcMode(sandboxNSMode),
|
||||
NetworkMode: dockercontainer.NetworkMode(sandboxNSMode),
|
||||
}
|
||||
setCapsHC := &dockercontainer.HostConfig{
|
||||
CapAdd: []string{"addCapA", "addCapB"},
|
||||
CapDrop: []string{"dropCapA", "dropCapB"},
|
||||
IpcMode: dockercontainer.IpcMode(sandboxNSMode),
|
||||
NetworkMode: dockercontainer.NetworkMode(sandboxNSMode),
|
||||
}
|
||||
setSELinuxHC := &dockercontainer.HostConfig{
|
||||
SecurityOpt: []string{
|
||||
fmt.Sprintf("%s:%s", securitycontext.DockerLabelUser, "user"),
|
||||
fmt.Sprintf("%s:%s", securitycontext.DockerLabelRole, "role"),
|
||||
fmt.Sprintf("%s:%s", securitycontext.DockerLabelType, "type"),
|
||||
fmt.Sprintf("%s:%s", securitycontext.DockerLabelLevel, "level"),
|
||||
},
|
||||
IpcMode: dockercontainer.IpcMode(sandboxNSMode),
|
||||
NetworkMode: dockercontainer.NetworkMode(sandboxNSMode),
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
sc *runtimeapi.LinuxContainerSecurityContext
|
||||
expected *dockercontainer.HostConfig
|
||||
}{
|
||||
{
|
||||
name: "container.SecurityContext.Privileged",
|
||||
sc: setPrivSC,
|
||||
expected: setPrivHC,
|
||||
},
|
||||
{
|
||||
name: "container.SecurityContext.Capabilities",
|
||||
sc: &runtimeapi.LinuxContainerSecurityContext{
|
||||
Capabilities: inputCapabilities(),
|
||||
},
|
||||
expected: setCapsHC,
|
||||
},
|
||||
{
|
||||
name: "container.SecurityContext.SELinuxOptions",
|
||||
sc: &runtimeapi.LinuxContainerSecurityContext{
|
||||
SelinuxOptions: inputSELinuxOptions(),
|
||||
},
|
||||
expected: setSELinuxHC,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
dockerCfg := &dockercontainer.HostConfig{}
|
||||
modifyHostConfig(tc.sc, sandboxID, dockerCfg)
|
||||
assert.Equal(t, tc.expected, dockerCfg, "[Test case %q]", tc.name)
|
||||
}
|
||||
}
|
||||
|
||||
func fullValidSecurityContext() *runtimeapi.LinuxContainerSecurityContext {
|
||||
priv := true
|
||||
return &runtimeapi.LinuxContainerSecurityContext{
|
||||
Privileged: &priv,
|
||||
Capabilities: inputCapabilities(),
|
||||
SelinuxOptions: inputSELinuxOptions(),
|
||||
}
|
||||
}
|
||||
|
||||
func inputCapabilities() *runtimeapi.Capability {
|
||||
return &runtimeapi.Capability{
|
||||
AddCapabilities: []string{"addCapA", "addCapB"},
|
||||
DropCapabilities: []string{"dropCapA", "dropCapB"},
|
||||
}
|
||||
}
|
||||
|
||||
func inputSELinuxOptions() *runtimeapi.SELinuxOption {
|
||||
user := "user"
|
||||
role := "role"
|
||||
stype := "type"
|
||||
level := "level"
|
||||
|
||||
return &runtimeapi.SELinuxOption{
|
||||
User: &user,
|
||||
Role: &role,
|
||||
Type: &stype,
|
||||
Level: &level,
|
||||
}
|
||||
}
|
||||
|
||||
func fullValidHostConfig() *dockercontainer.HostConfig {
|
||||
return &dockercontainer.HostConfig{
|
||||
Privileged: true,
|
||||
NetworkMode: "none",
|
||||
CapAdd: []string{"addCapA", "addCapB"},
|
||||
CapDrop: []string{"dropCapA", "dropCapB"},
|
||||
SecurityOpt: []string{
|
||||
fmt.Sprintf("%s:%s", securitycontext.DockerLabelUser, "user"),
|
||||
fmt.Sprintf("%s:%s", securitycontext.DockerLabelRole, "role"),
|
||||
fmt.Sprintf("%s:%s", securitycontext.DockerLabelType, "type"),
|
||||
fmt.Sprintf("%s:%s", securitycontext.DockerLabelLevel, "level"),
|
||||
},
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue