1
0
Fork 0
forked from barak/tarpoon

Add glide.yaml and vendor deps

This commit is contained in:
Dalton Hubble 2016-12-03 22:43:32 -08:00
parent db918f12ad
commit 5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions

98
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/BUILD generated vendored Normal file
View file

@ -0,0 +1,98 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"fake_kuberuntime_manager.go",
"helpers.go",
"instrumented_services.go",
"kuberuntime_container.go",
"kuberuntime_gc.go",
"kuberuntime_image.go",
"kuberuntime_logs.go",
"kuberuntime_manager.go",
"kuberuntime_sandbox.go",
"labels.go",
"legacy.go",
"security_context.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/credentialprovider:go_default_library",
"//pkg/kubelet/api:go_default_library",
"//pkg/kubelet/api/v1alpha1/runtime:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/dockertools:go_default_library",
"//pkg/kubelet/events:go_default_library",
"//pkg/kubelet/images:go_default_library",
"//pkg/kubelet/lifecycle:go_default_library",
"//pkg/kubelet/metrics:go_default_library",
"//pkg/kubelet/network:go_default_library",
"//pkg/kubelet/prober/results:go_default_library",
"//pkg/kubelet/qos:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//pkg/kubelet/util/cache:go_default_library",
"//pkg/kubelet/util/format:go_default_library",
"//pkg/securitycontext:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/errors:go_default_library",
"//pkg/util/flowcontrol:go_default_library",
"//pkg/util/parsers:go_default_library",
"//pkg/util/runtime:go_default_library",
"//pkg/util/selinux:go_default_library",
"//pkg/util/sets:go_default_library",
"//vendor:github.com/coreos/go-semver/semver",
"//vendor:github.com/docker/docker/pkg/jsonlog",
"//vendor:github.com/fsnotify/fsnotify",
"//vendor:github.com/golang/glog",
"//vendor:github.com/google/cadvisor/info/v1",
],
)
go_test(
name = "go_default_test",
srcs = [
"helpers_test.go",
"kuberuntime_container_test.go",
"kuberuntime_gc_test.go",
"kuberuntime_image_test.go",
"kuberuntime_logs_test.go",
"kuberuntime_manager_test.go",
"kuberuntime_sandbox_test.go",
"labels_test.go",
],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/apis/componentconfig:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/kubelet/api/testing:go_default_library",
"//pkg/kubelet/api/v1alpha1/runtime:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/container/testing:go_default_library",
"//pkg/kubelet/network:go_default_library",
"//pkg/kubelet/network/testing:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/flowcontrol:go_default_library",
"//pkg/util/intstr:go_default_library",
"//pkg/util/sets:go_default_library",
"//vendor:github.com/golang/mock/gomock",
"//vendor:github.com/google/cadvisor/info/v1",
"//vendor:github.com/stretchr/testify/assert",
],
)

View file

@ -0,0 +1,19 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package kuberuntime contains an implementation of kubecontainer.Runtime using
// the interface in pkg/kubelet/v1.
package kuberuntime

View file

@ -0,0 +1,131 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"io/ioutil"
"net/http"
"time"
cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/credentialprovider"
internalapi "k8s.io/kubernetes/pkg/kubelet/api"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/network"
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/types"
kubetypes "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/flowcontrol"
)
type fakeHTTP struct {
url string
err error
}
func (f *fakeHTTP) Get(url string) (*http.Response, error) {
f.url = url
return nil, f.err
}
// fakeRuntimeHelper implements kubecontainer.RuntimeHelper interfaces for testing purposes.
type fakeRuntimeHelper struct{}
func (f *fakeRuntimeHelper) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, error) {
var opts kubecontainer.RunContainerOptions
if len(container.TerminationMessagePath) != 0 {
testPodContainerDir, err := ioutil.TempDir("", "fooPodContainerDir")
if err != nil {
return nil, err
}
opts.PodContainerDir = testPodContainerDir
}
return &opts, nil
}
func (f *fakeRuntimeHelper) GetClusterDNS(pod *v1.Pod) ([]string, []string, error) {
return nil, nil, nil
}
// This is not used by docker runtime.
func (f *fakeRuntimeHelper) GeneratePodHostNameAndDomain(pod *v1.Pod) (string, string, error) {
return "", "", nil
}
func (f *fakeRuntimeHelper) GetPodDir(kubetypes.UID) string {
return ""
}
func (f *fakeRuntimeHelper) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 {
return nil
}
type fakePodGetter struct {
pods map[types.UID]*v1.Pod
}
func newFakePodGetter() *fakePodGetter {
return &fakePodGetter{make(map[types.UID]*v1.Pod)}
}
func (f *fakePodGetter) GetPodByUID(uid types.UID) (*v1.Pod, bool) {
pod, found := f.pods[uid]
return pod, found
}
func NewFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageService internalapi.ImageManagerService, machineInfo *cadvisorapi.MachineInfo, networkPlugin network.NetworkPlugin, osInterface kubecontainer.OSInterface) (*kubeGenericRuntimeManager, error) {
recorder := &record.FakeRecorder{}
kubeRuntimeManager := &kubeGenericRuntimeManager{
recorder: recorder,
cpuCFSQuota: false,
livenessManager: proberesults.NewManager(),
containerRefManager: kubecontainer.NewRefManager(),
machineInfo: machineInfo,
osInterface: osInterface,
networkPlugin: networkPlugin,
runtimeHelper: &fakeRuntimeHelper{},
runtimeService: runtimeService,
imageService: imageService,
keyring: credentialprovider.NewDockerKeyring(),
}
typedVersion, err := runtimeService.Version(kubeRuntimeAPIVersion)
if err != nil {
return nil, err
}
kubeRuntimeManager.containerGC = NewContainerGC(runtimeService, newFakePodGetter(), kubeRuntimeManager)
kubeRuntimeManager.runtimeName = typedVersion.GetRuntimeName()
kubeRuntimeManager.imagePuller = images.NewImageManager(
kubecontainer.FilterEventRecorder(recorder),
kubeRuntimeManager,
flowcontrol.NewBackOff(time.Second, 300*time.Second),
false,
0, // Disable image pull throttling by setting QPS to 0,
0,
)
kubeRuntimeManager.runner = lifecycle.NewHandlerRunner(
&fakeHTTP{},
kubeRuntimeManager,
kubeRuntimeManager)
return kubeRuntimeManager, nil
}

View file

@ -0,0 +1,236 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"fmt"
"path/filepath"
"strconv"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/types"
)
const (
// Taken from lmctfy https://github.com/google/lmctfy/blob/master/lmctfy/controllers/cpu_controller.cc
minShares = 2
sharesPerCPU = 1024
milliCPUToCPU = 1000
// 100000 is equivalent to 100ms
quotaPeriod = 100 * minQuotaPeriod
minQuotaPeriod = 1000
)
var (
// The default dns opt strings
defaultDNSOptions = []string{"ndots:5"}
)
type podsByID []*kubecontainer.Pod
func (b podsByID) Len() int { return len(b) }
func (b podsByID) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b podsByID) Less(i, j int) bool { return b[i].ID < b[j].ID }
type containersByID []*kubecontainer.Container
func (b containersByID) Len() int { return len(b) }
func (b containersByID) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b containersByID) Less(i, j int) bool { return b[i].ID.ID < b[j].ID.ID }
// Newest first.
type podSandboxByCreated []*runtimeapi.PodSandbox
func (p podSandboxByCreated) Len() int { return len(p) }
func (p podSandboxByCreated) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p podSandboxByCreated) Less(i, j int) bool { return p[i].GetCreatedAt() > p[j].GetCreatedAt() }
type containerStatusByCreated []*kubecontainer.ContainerStatus
func (c containerStatusByCreated) Len() int { return len(c) }
func (c containerStatusByCreated) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c containerStatusByCreated) Less(i, j int) bool { return c[i].CreatedAt.After(c[j].CreatedAt) }
// toKubeContainerState converts runtimeapi.ContainerState to kubecontainer.ContainerState.
func toKubeContainerState(state runtimeapi.ContainerState) kubecontainer.ContainerState {
switch state {
case runtimeapi.ContainerState_CONTAINER_CREATED:
return kubecontainer.ContainerStateCreated
case runtimeapi.ContainerState_CONTAINER_RUNNING:
return kubecontainer.ContainerStateRunning
case runtimeapi.ContainerState_CONTAINER_EXITED:
return kubecontainer.ContainerStateExited
case runtimeapi.ContainerState_CONTAINER_UNKNOWN:
return kubecontainer.ContainerStateUnknown
}
return kubecontainer.ContainerStateUnknown
}
// toRuntimeProtocol converts v1.Protocol to runtimeapi.Protocol.
func toRuntimeProtocol(protocol v1.Protocol) runtimeapi.Protocol {
switch protocol {
case v1.ProtocolTCP:
return runtimeapi.Protocol_TCP
case v1.ProtocolUDP:
return runtimeapi.Protocol_UDP
}
glog.Warningf("Unknown protocol %q: defaulting to TCP", protocol)
return runtimeapi.Protocol_TCP
}
// toKubeContainer converts runtimeapi.Container to kubecontainer.Container.
func (m *kubeGenericRuntimeManager) toKubeContainer(c *runtimeapi.Container) (*kubecontainer.Container, error) {
if c == nil || c.Id == nil || c.Image == nil || c.State == nil {
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime container")
}
labeledInfo := getContainerInfoFromLabels(c.Labels)
annotatedInfo := getContainerInfoFromAnnotations(c.Annotations)
return &kubecontainer.Container{
ID: kubecontainer.ContainerID{Type: m.runtimeName, ID: c.GetId()},
Name: labeledInfo.ContainerName,
Image: c.Image.GetImage(),
Hash: annotatedInfo.Hash,
State: toKubeContainerState(c.GetState()),
}, nil
}
// sandboxToKubeContainer converts runtimeapi.PodSandbox to kubecontainer.Container.
// This is only needed because we need to return sandboxes as if they were
// kubecontainer.Containers to avoid substantial changes to PLEG.
// TODO: Remove this once it becomes obsolete.
func (m *kubeGenericRuntimeManager) sandboxToKubeContainer(s *runtimeapi.PodSandbox) (*kubecontainer.Container, error) {
if s == nil || s.Id == nil || s.State == nil {
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime container")
}
return &kubecontainer.Container{
ID: kubecontainer.ContainerID{Type: m.runtimeName, ID: s.GetId()},
State: kubecontainer.SandboxToContainerState(s.GetState()),
}, nil
}
// getImageUser gets uid or user name that will run the command(s) from image. The function
// guarantees that only one of them is set.
func (m *kubeGenericRuntimeManager) getImageUser(image string) (*int64, *string, error) {
imageStatus, err := m.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: &image})
if err != nil {
return nil, nil, err
}
if imageStatus != nil && imageStatus.Uid != nil {
// If uid is set, return uid.
return imageStatus.Uid, nil, nil
}
if imageStatus != nil && imageStatus.Username != nil {
// If uid is not set, but user name is set, return user name.
return nil, imageStatus.Username, nil
}
// If non of them is set, treat it as root.
return new(int64), nil, nil
}
// isContainerFailed returns true if container has exited and exitcode is not zero.
func isContainerFailed(status *kubecontainer.ContainerStatus) bool {
if status.State == kubecontainer.ContainerStateExited && status.ExitCode != 0 {
return true
}
return false
}
// milliCPUToShares converts milliCPU to CPU shares
func milliCPUToShares(milliCPU int64) int64 {
if milliCPU == 0 {
// Return 2 here to really match kernel default for zero milliCPU.
return minShares
}
// Conceptually (milliCPU / milliCPUToCPU) * sharesPerCPU, but factored to improve rounding.
shares := (milliCPU * sharesPerCPU) / milliCPUToCPU
if shares < minShares {
return minShares
}
return shares
}
// milliCPUToQuota converts milliCPU to CFS quota and period values
func milliCPUToQuota(milliCPU int64) (quota int64, period int64) {
// CFS quota is measured in two values:
// - cfs_period_us=100ms (the amount of time to measure usage across)
// - cfs_quota=20ms (the amount of cpu time allowed to be used across a period)
// so in the above example, you are limited to 20% of a single CPU
// for multi-cpu environments, you just scale equivalent amounts
if milliCPU == 0 {
return
}
// we set the period to 100ms by default
period = quotaPeriod
// we then convert your milliCPU to a value normalized over a period
quota = (milliCPU * quotaPeriod) / milliCPUToCPU
// quota needs to be a minimum of 1ms.
if quota < minQuotaPeriod {
quota = minQuotaPeriod
}
return
}
// getStableKey generates a key (string) to uniquely identify a
// (pod, container) tuple. The key should include the content of the
// container, so that any change to the container generates a new key.
func getStableKey(pod *v1.Pod, container *v1.Container) string {
hash := strconv.FormatUint(kubecontainer.HashContainer(container), 16)
return fmt.Sprintf("%s_%s_%s_%s_%s", pod.Name, pod.Namespace, string(pod.UID), container.Name, hash)
}
// buildContainerLogsPath builds log path for container relative to pod logs directory.
func buildContainerLogsPath(containerName string, restartCount int) string {
return fmt.Sprintf("%s_%d.log", containerName, restartCount)
}
// buildFullContainerLogsPath builds absolute log path for container.
func buildFullContainerLogsPath(podUID types.UID, containerName string, restartCount int) string {
return filepath.Join(buildPodLogsDirectory(podUID), buildContainerLogsPath(containerName, restartCount))
}
// buildPodLogsDirectory builds absolute log directory path for a pod sandbox.
func buildPodLogsDirectory(podUID types.UID) string {
return filepath.Join(podLogsRootDirectory, string(podUID))
}
// toKubeRuntimeStatus converts the runtimeapi.RuntimeStatus to kubecontainer.RuntimeStatus.
func toKubeRuntimeStatus(status *runtimeapi.RuntimeStatus) *kubecontainer.RuntimeStatus {
conditions := []kubecontainer.RuntimeCondition{}
for _, c := range status.GetConditions() {
conditions = append(conditions, kubecontainer.RuntimeCondition{
Type: kubecontainer.RuntimeConditionType(c.GetType()),
Status: c.GetStatus(),
Reason: c.GetReason(),
Message: c.GetMessage(),
})
}
return &kubecontainer.RuntimeStatus{Conditions: conditions}
}

View file

@ -0,0 +1,47 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api/v1"
)
func TestStableKey(t *testing.T) {
container := &v1.Container{
Name: "test_container",
Image: "foo/image:v1",
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "test_pod",
Namespace: "test_pod_namespace",
UID: "test_pod_uid",
},
Spec: v1.PodSpec{
Containers: []v1.Container{*container},
},
}
oldKey := getStableKey(pod, container)
// Updating the container image should change the key.
container.Image = "foo/image:v2"
newKey := getStableKey(pod, container)
assert.NotEqual(t, oldKey, newKey)
}

View file

@ -0,0 +1,258 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"time"
internalapi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/metrics"
)
// instrumentedRuntimeService wraps the RuntimeService and records the operations
// and errors metrics.
type instrumentedRuntimeService struct {
service internalapi.RuntimeService
}
// Creates an instrumented RuntimeInterface from an existing RuntimeService.
func NewInstrumentedRuntimeService(service internalapi.RuntimeService) internalapi.RuntimeService {
return &instrumentedRuntimeService{service: service}
}
// instrumentedImageManagerService wraps the ImageManagerService and records the operations
// and errors metrics.
type instrumentedImageManagerService struct {
service internalapi.ImageManagerService
}
// Creates an instrumented ImageManagerService from an existing ImageManagerService.
func NewInstrumentedImageManagerService(service internalapi.ImageManagerService) internalapi.ImageManagerService {
return &instrumentedImageManagerService{service: service}
}
// recordOperation records the duration of the operation.
func recordOperation(operation string, start time.Time) {
metrics.RuntimeOperations.WithLabelValues(operation).Inc()
metrics.RuntimeOperationsLatency.WithLabelValues(operation).Observe(metrics.SinceInMicroseconds(start))
}
// recordError records error for metric if an error occurred.
func recordError(operation string, err error) {
if err != nil {
metrics.RuntimeOperationsErrors.WithLabelValues(operation).Inc()
}
}
func (in instrumentedRuntimeService) Version(apiVersion string) (*runtimeapi.VersionResponse, error) {
const operation = "version"
defer recordOperation(operation, time.Now())
out, err := in.service.Version(apiVersion)
recordError(operation, err)
return out, err
}
func (in instrumentedRuntimeService) Status() (*runtimeapi.RuntimeStatus, error) {
const operation = "status"
defer recordOperation(operation, time.Now())
out, err := in.service.Status()
recordError(operation, err)
return out, err
}
func (in instrumentedRuntimeService) CreateContainer(podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
const operation = "create_container"
defer recordOperation(operation, time.Now())
out, err := in.service.CreateContainer(podSandboxID, config, sandboxConfig)
recordError(operation, err)
return out, err
}
func (in instrumentedRuntimeService) StartContainer(containerID string) error {
const operation = "start_container"
defer recordOperation(operation, time.Now())
err := in.service.StartContainer(containerID)
recordError(operation, err)
return err
}
func (in instrumentedRuntimeService) StopContainer(containerID string, timeout int64) error {
const operation = "stop_container"
defer recordOperation(operation, time.Now())
err := in.service.StopContainer(containerID, timeout)
recordError(operation, err)
return err
}
func (in instrumentedRuntimeService) RemoveContainer(containerID string) error {
const operation = "remove_container"
defer recordOperation(operation, time.Now())
err := in.service.RemoveContainer(containerID)
recordError(operation, err)
return err
}
func (in instrumentedRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
const operation = "list_containers"
defer recordOperation(operation, time.Now())
out, err := in.service.ListContainers(filter)
recordError(operation, err)
return out, err
}
func (in instrumentedRuntimeService) ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error) {
const operation = "container_status"
defer recordOperation(operation, time.Now())
out, err := in.service.ContainerStatus(containerID)
recordError(operation, err)
return out, err
}
func (in instrumentedRuntimeService) ExecSync(containerID string, cmd []string, timeout time.Duration) ([]byte, []byte, error) {
const operation = "exec_sync"
defer recordOperation(operation, time.Now())
stdout, stderr, err := in.service.ExecSync(containerID, cmd, timeout)
recordError(operation, err)
return stdout, stderr, err
}
func (in instrumentedRuntimeService) Exec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
const operation = "exec"
defer recordOperation(operation, time.Now())
resp, err := in.service.Exec(req)
recordError(operation, err)
return resp, err
}
func (in instrumentedRuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
const operation = "attach"
defer recordOperation(operation, time.Now())
resp, err := in.service.Attach(req)
recordError(operation, err)
return resp, err
}
func (in instrumentedRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (string, error) {
const operation = "run_podsandbox"
defer recordOperation(operation, time.Now())
out, err := in.service.RunPodSandbox(config)
recordError(operation, err)
return out, err
}
func (in instrumentedRuntimeService) StopPodSandbox(podSandboxID string) error {
const operation = "stop_podsandbox"
defer recordOperation(operation, time.Now())
err := in.service.StopPodSandbox(podSandboxID)
recordError(operation, err)
return err
}
func (in instrumentedRuntimeService) RemovePodSandbox(podSandboxID string) error {
const operation = "remove_podsandbox"
defer recordOperation(operation, time.Now())
err := in.service.RemovePodSandbox(podSandboxID)
recordError(operation, err)
return err
}
func (in instrumentedRuntimeService) PodSandboxStatus(podSandboxID string) (*runtimeapi.PodSandboxStatus, error) {
const operation = "podsandbox_status"
defer recordOperation(operation, time.Now())
out, err := in.service.PodSandboxStatus(podSandboxID)
recordError(operation, err)
return out, err
}
func (in instrumentedRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) {
const operation = "list_podsandbox"
defer recordOperation(operation, time.Now())
out, err := in.service.ListPodSandbox(filter)
recordError(operation, err)
return out, err
}
func (in instrumentedRuntimeService) PortForward(req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
const operation = "port_forward"
defer recordOperation(operation, time.Now())
resp, err := in.service.PortForward(req)
recordError(operation, err)
return resp, err
}
func (in instrumentedRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) error {
const operation = "update_runtime_config"
defer recordOperation(operation, time.Now())
err := in.service.UpdateRuntimeConfig(runtimeConfig)
recordError(operation, err)
return err
}
func (in instrumentedImageManagerService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) {
const operation = "list_images"
defer recordOperation(operation, time.Now())
out, err := in.service.ListImages(filter)
recordError(operation, err)
return out, err
}
func (in instrumentedImageManagerService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error) {
const operation = "image_status"
defer recordOperation(operation, time.Now())
out, err := in.service.ImageStatus(image)
recordError(operation, err)
return out, err
}
func (in instrumentedImageManagerService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig) error {
const operation = "pull_image"
defer recordOperation(operation, time.Now())
err := in.service.PullImage(image, auth)
recordError(operation, err)
return err
}
func (in instrumentedImageManagerService) RemoveImage(image *runtimeapi.ImageSpec) error {
const operation = "remove_image"
defer recordOperation(operation, time.Now())
err := in.service.RemoveImage(image)
recordError(operation, err)
return err
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,66 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api/v1"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
)
// TestRemoveContainer tests removing the container and its corresponding container logs.
func TestRemoveContainer(t *testing.T) {
fakeRuntime, _, m, err := createTestRuntimeManager()
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Image: "busybox",
ImagePullPolicy: v1.PullIfNotPresent,
},
},
},
}
// Create fake sandbox and container
_, fakeContainers := makeAndSetFakePod(t, m, fakeRuntime, pod)
assert.Equal(t, len(fakeContainers), 1)
containerId := fakeContainers[0].GetId()
fakeOS := m.osInterface.(*containertest.FakeOS)
err = m.removeContainer(containerId)
assert.NoError(t, err)
// Verify container log is removed
expectedContainerLogPath := filepath.Join(podLogsRootDirectory, "12345678", "foo_0.log")
expectedContainerLogSymlink := legacyLogSymlink(containerId, "foo", "bar", "new")
assert.Equal(t, fakeOS.Removes, []string{expectedContainerLogPath, expectedContainerLogSymlink})
// Verify container is removed
fakeRuntime.AssertCalls([]string{"RemoveContainer"})
containers, err := fakeRuntime.ListContainers(&runtimeapi.ContainerFilter{Id: &containerId})
assert.NoError(t, err)
assert.Empty(t, containers)
}

View file

@ -0,0 +1,351 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"fmt"
"os"
"path/filepath"
"sort"
"time"
"github.com/golang/glog"
internalapi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/types"
)
// sandboxMinGCAge is the minimum age for an empty sandbox before it is garbage collected.
// This is introduced to avoid a sandbox being garbage collected before its containers are
// created.
// Notice that if the first container of a sandbox is created too late (exceeds sandboxMinGCAge),
// the sandbox could still be garbaged collected. In that case, SyncPod will recreate the
// sandbox and make sure old containers are all stopped.
// In the following figure, 'o' is a stopped sandbox, 'x' is a removed sandbox. It shows
// that, approximately if a sandbox keeps crashing and MinAge = 1/n GC Period, there will
// be 1/n more sandboxes not garbage collected.
// oooooo|xxxxxx|xxxxxx| <--- MinAge = 0
// gc gc gc gc
// oooooo|oooxxx|xxxxxx| <--- MinAge = 1/2 GC Perod
const sandboxMinGCAge time.Duration = 30 * time.Second
// containerGC is the manager of garbage collection.
type containerGC struct {
client internalapi.RuntimeService
manager *kubeGenericRuntimeManager
podGetter podGetter
}
// NewContainerGC creates a new containerGC.
func NewContainerGC(client internalapi.RuntimeService, podGetter podGetter, manager *kubeGenericRuntimeManager) *containerGC {
return &containerGC{
client: client,
manager: manager,
podGetter: podGetter,
}
}
// containerGCInfo is the internal information kept for containers being considered for GC.
type containerGCInfo struct {
// The ID of the container.
id string
// The name of the container.
name string
// The sandbox ID which this container belongs to
sandboxID string
// Creation time for the container.
createTime time.Time
}
// evictUnit is considered for eviction as units of (UID, container name) pair.
type evictUnit struct {
// UID of the pod.
uid types.UID
// Name of the container in the pod.
name string
}
type containersByEvictUnit map[evictUnit][]containerGCInfo
// NumContainers returns the number of containers in this map.
func (cu containersByEvictUnit) NumContainers() int {
num := 0
for key := range cu {
num += len(cu[key])
}
return num
}
// NumEvictUnits returns the number of pod in this map.
func (cu containersByEvictUnit) NumEvictUnits() int {
return len(cu)
}
// Newest first.
type byCreated []containerGCInfo
func (a byCreated) Len() int { return len(a) }
func (a byCreated) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byCreated) Less(i, j int) bool { return a[i].createTime.After(a[j].createTime) }
// enforceMaxContainersPerEvictUnit enforces MaxPerPodContainer for each evictUnit.
func (cgc *containerGC) enforceMaxContainersPerEvictUnit(evictUnits containersByEvictUnit, MaxContainers int) {
for key := range evictUnits {
toRemove := len(evictUnits[key]) - MaxContainers
if toRemove > 0 {
evictUnits[key] = cgc.removeOldestN(evictUnits[key], toRemove)
}
}
}
// removeOldestN removes the oldest toRemove containers and returns the resulting slice.
func (cgc *containerGC) removeOldestN(containers []containerGCInfo, toRemove int) []containerGCInfo {
// Remove from oldest to newest (last to first).
numToKeep := len(containers) - toRemove
for i := numToKeep; i < len(containers); i++ {
if err := cgc.manager.removeContainer(containers[i].id); err != nil {
glog.Errorf("Failed to remove container %q: %v", containers[i].id, err)
}
}
// Assume we removed the containers so that we're not too aggressive.
return containers[:numToKeep]
}
// removeSandbox removes the sandbox by sandboxID.
func (cgc *containerGC) removeSandbox(sandboxID string) {
glog.V(4).Infof("Removing sandbox %q", sandboxID)
// In normal cases, kubelet should've already called StopPodSandbox before
// GC kicks in. To guard against the rare cases where this is not true, try
// stopping the sandbox before removing it.
if err := cgc.client.StopPodSandbox(sandboxID); err != nil {
glog.Errorf("Failed to stop sandbox %q before removing: %v", sandboxID, err)
return
}
if err := cgc.client.RemovePodSandbox(sandboxID); err != nil {
glog.Errorf("Failed to remove sandbox %q: %v", sandboxID, err)
}
}
// isPodDeleted returns true if the pod is already deleted.
func (cgc *containerGC) isPodDeleted(podUID types.UID) bool {
_, found := cgc.podGetter.GetPodByUID(podUID)
return !found
}
// evictableContainers gets all containers that are evictable. Evictable containers are: not running
// and created more than MinAge ago.
func (cgc *containerGC) evictableContainers(minAge time.Duration) (containersByEvictUnit, error) {
containers, err := cgc.manager.getKubeletContainers(true)
if err != nil {
return containersByEvictUnit{}, err
}
evictUnits := make(containersByEvictUnit)
newestGCTime := time.Now().Add(-minAge)
for _, container := range containers {
// Prune out running containers.
if container.GetState() == runtimeapi.ContainerState_CONTAINER_RUNNING {
continue
}
createdAt := time.Unix(0, container.GetCreatedAt())
if newestGCTime.Before(createdAt) {
continue
}
labeledInfo := getContainerInfoFromLabels(container.Labels)
containerInfo := containerGCInfo{
id: container.GetId(),
name: container.Metadata.GetName(),
createTime: createdAt,
sandboxID: container.GetPodSandboxId(),
}
key := evictUnit{
uid: labeledInfo.PodUID,
name: containerInfo.name,
}
evictUnits[key] = append(evictUnits[key], containerInfo)
}
// Sort the containers by age.
for uid := range evictUnits {
sort.Sort(byCreated(evictUnits[uid]))
}
return evictUnits, nil
}
// evict all containers that are evictable
func (cgc *containerGC) evictContainers(gcPolicy kubecontainer.ContainerGCPolicy, allSourcesReady bool) error {
// Separate containers by evict units.
evictUnits, err := cgc.evictableContainers(gcPolicy.MinAge)
if err != nil {
return err
}
// Remove deleted pod containers if all sources are ready.
if allSourcesReady {
for key, unit := range evictUnits {
if cgc.isPodDeleted(key.uid) {
cgc.removeOldestN(unit, len(unit)) // Remove all.
delete(evictUnits, key)
}
}
}
// Enforce max containers per evict unit.
if gcPolicy.MaxPerPodContainer >= 0 {
cgc.enforceMaxContainersPerEvictUnit(evictUnits, gcPolicy.MaxPerPodContainer)
}
// Enforce max total number of containers.
if gcPolicy.MaxContainers >= 0 && evictUnits.NumContainers() > gcPolicy.MaxContainers {
// Leave an equal number of containers per evict unit (min: 1).
numContainersPerEvictUnit := gcPolicy.MaxContainers / evictUnits.NumEvictUnits()
if numContainersPerEvictUnit < 1 {
numContainersPerEvictUnit = 1
}
cgc.enforceMaxContainersPerEvictUnit(evictUnits, numContainersPerEvictUnit)
// If we still need to evict, evict oldest first.
numContainers := evictUnits.NumContainers()
if numContainers > gcPolicy.MaxContainers {
flattened := make([]containerGCInfo, 0, numContainers)
for key := range evictUnits {
flattened = append(flattened, evictUnits[key]...)
}
sort.Sort(byCreated(flattened))
cgc.removeOldestN(flattened, numContainers-gcPolicy.MaxContainers)
}
}
return nil
}
// evictSandboxes evicts all sandboxes that are evictable. Evictable sandboxes are: not running
// and contains no containers at all.
func (cgc *containerGC) evictSandboxes(minAge time.Duration) error {
containers, err := cgc.manager.getKubeletContainers(true)
if err != nil {
return err
}
sandboxes, err := cgc.manager.getKubeletSandboxes(true)
if err != nil {
return err
}
evictSandboxes := make([]string, 0)
newestGCTime := time.Now().Add(-minAge)
for _, sandbox := range sandboxes {
// Prune out ready sandboxes.
if sandbox.GetState() == runtimeapi.PodSandboxState_SANDBOX_READY {
continue
}
// Prune out sandboxes that still have containers.
found := false
sandboxID := sandbox.GetId()
for _, container := range containers {
if container.GetPodSandboxId() == sandboxID {
found = true
break
}
}
if found {
continue
}
// Only garbage collect sandboxes older than sandboxMinGCAge.
createdAt := time.Unix(0, sandbox.GetCreatedAt())
if createdAt.After(newestGCTime) {
continue
}
evictSandboxes = append(evictSandboxes, sandboxID)
}
for _, sandbox := range evictSandboxes {
cgc.removeSandbox(sandbox)
}
return nil
}
// evictPodLogsDirectories evicts all evictable pod logs directories. Pod logs directories
// are evictable if there are no corresponding pods.
func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error {
osInterface := cgc.manager.osInterface
if allSourcesReady {
// Only remove pod logs directories when all sources are ready.
dirs, err := osInterface.ReadDir(podLogsRootDirectory)
if err != nil {
return fmt.Errorf("failed to read podLogsRootDirectory %q: %v", podLogsRootDirectory, err)
}
for _, dir := range dirs {
name := dir.Name()
podUID := types.UID(name)
if !cgc.isPodDeleted(podUID) {
continue
}
err := osInterface.RemoveAll(filepath.Join(podLogsRootDirectory, name))
if err != nil {
glog.Errorf("Failed to remove pod logs directory %q: %v", name, err)
}
}
}
// Remove dead container log symlinks.
// TODO(random-liu): Remove this after cluster logging supports CRI container log path.
logSymlinks, _ := osInterface.Glob(filepath.Join(legacyContainerLogsDir, fmt.Sprintf("*.%s", legacyLogSuffix)))
for _, logSymlink := range logSymlinks {
if _, err := osInterface.Stat(logSymlink); os.IsNotExist(err) {
err := osInterface.Remove(logSymlink)
if err != nil {
glog.Errorf("Failed to remove container log dead symlink %q: %v", logSymlink, err)
}
}
}
return nil
}
// GarbageCollect removes dead containers using the specified container gc policy.
// Note that gc policy is not applied to sandboxes. Sandboxes are only removed when they are
// not ready and containing no containers.
//
// GarbageCollect consists of the following steps:
// * gets evictable containers which are not active and created more than gcPolicy.MinAge ago.
// * removes oldest dead containers for each pod by enforcing gcPolicy.MaxPerPodContainer.
// * removes oldest dead containers by enforcing gcPolicy.MaxContainers.
// * gets evictable sandboxes which are not ready and contains no containers.
// * removes evictable sandboxes.
func (cgc *containerGC) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy, allSourcesReady bool) error {
// Remove evictable containers
if err := cgc.evictContainers(gcPolicy, allSourcesReady); err != nil {
return err
}
// Remove sandboxes with zero containers
if err := cgc.evictSandboxes(sandboxMinGCAge); err != nil {
return err
}
// Remove pod sandbox log directory
return cgc.evictPodLogsDirectories(allSourcesReady)
}

View file

@ -0,0 +1,334 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"os"
"path/filepath"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api/v1"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
)
func TestSandboxGC(t *testing.T) {
fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err)
pods := []*v1.Pod{
makeTestPod("foo1", "new", "1234", []v1.Container{
makeTestContainer("bar1", "busybox"),
makeTestContainer("bar2", "busybox"),
}),
makeTestPod("foo2", "new", "5678", []v1.Container{
makeTestContainer("bar3", "busybox"),
}),
}
for c, test := range []struct {
description string // description of the test case
sandboxes []sandboxTemplate // templates of sandboxes
containers []containerTemplate // templates of containers
minAge time.Duration // sandboxMinGCAge
remain []int // template indexes of remaining sandboxes
}{
{
description: "sandbox with no containers should be garbage collected.",
sandboxes: []sandboxTemplate{
{pod: pods[0], state: runtimeapi.PodSandboxState_SANDBOX_NOTREADY},
},
containers: []containerTemplate{},
remain: []int{},
},
{
description: "running sandbox should not be garbage collected.",
sandboxes: []sandboxTemplate{
{pod: pods[0], state: runtimeapi.PodSandboxState_SANDBOX_READY},
},
containers: []containerTemplate{},
remain: []int{0},
},
{
description: "sandbox with containers should not be garbage collected.",
sandboxes: []sandboxTemplate{
{pod: pods[0], state: runtimeapi.PodSandboxState_SANDBOX_NOTREADY},
},
containers: []containerTemplate{
{pod: pods[0], container: &pods[0].Spec.Containers[0], state: runtimeapi.ContainerState_CONTAINER_EXITED},
},
remain: []int{0},
},
{
description: "sandbox within min age should not be garbage collected.",
sandboxes: []sandboxTemplate{
{pod: pods[0], createdAt: time.Now().UnixNano(), state: runtimeapi.PodSandboxState_SANDBOX_NOTREADY},
{pod: pods[1], createdAt: time.Now().Add(-2 * time.Hour).UnixNano(), state: runtimeapi.PodSandboxState_SANDBOX_NOTREADY},
},
containers: []containerTemplate{},
minAge: time.Hour, // assume the test won't take an hour
remain: []int{0},
},
{
description: "multiple sandboxes should be handled properly.",
sandboxes: []sandboxTemplate{
// running sandbox.
{pod: pods[0], attempt: 1, state: runtimeapi.PodSandboxState_SANDBOX_READY},
// exited sandbox with containers.
{pod: pods[1], attempt: 1, state: runtimeapi.PodSandboxState_SANDBOX_NOTREADY},
// exited sandbox without containers.
{pod: pods[1], attempt: 0, state: runtimeapi.PodSandboxState_SANDBOX_NOTREADY},
},
containers: []containerTemplate{
{pod: pods[1], container: &pods[1].Spec.Containers[0], sandboxAttempt: 1, state: runtimeapi.ContainerState_CONTAINER_EXITED},
},
remain: []int{0, 1},
},
} {
t.Logf("TestCase #%d: %+v", c, test)
fakeSandboxes := makeFakePodSandboxes(t, m, test.sandboxes)
fakeContainers := makeFakeContainers(t, m, test.containers)
fakeRuntime.SetFakeSandboxes(fakeSandboxes)
fakeRuntime.SetFakeContainers(fakeContainers)
err := m.containerGC.evictSandboxes(test.minAge)
assert.NoError(t, err)
realRemain, err := fakeRuntime.ListPodSandbox(nil)
assert.NoError(t, err)
assert.Len(t, realRemain, len(test.remain))
for _, remain := range test.remain {
status, err := fakeRuntime.PodSandboxStatus(fakeSandboxes[remain].GetId())
assert.NoError(t, err)
assert.Equal(t, &fakeSandboxes[remain].PodSandboxStatus, status)
}
}
}
func TestContainerGC(t *testing.T) {
fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err)
fakePodGetter := m.containerGC.podGetter.(*fakePodGetter)
makeGCContainer := func(podName, containerName string, attempt int, createdAt int64, state runtimeapi.ContainerState) containerTemplate {
container := makeTestContainer(containerName, "test-image")
pod := makeTestPod(podName, "test-ns", podName, []v1.Container{container})
if podName != "deleted" {
// initialize the pod getter, explicitly exclude deleted pod
fakePodGetter.pods[pod.UID] = pod
}
return containerTemplate{
pod: pod,
container: &container,
attempt: attempt,
createdAt: createdAt,
state: state,
}
}
defaultGCPolicy := kubecontainer.ContainerGCPolicy{MinAge: time.Hour, MaxPerPodContainer: 2, MaxContainers: 6}
for c, test := range []struct {
description string // description of the test case
containers []containerTemplate // templates of containers
policy *kubecontainer.ContainerGCPolicy // container gc policy
remain []int // template indexes of remaining containers
}{
{
description: "all containers should be removed when max container limit is 0",
containers: []containerTemplate{
makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
},
policy: &kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: 1, MaxContainers: 0},
remain: []int{},
},
{
description: "max containers should be complied when no max per pod container limit is set",
containers: []containerTemplate{
makeGCContainer("foo", "bar", 4, 4, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 3, 3, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
},
policy: &kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: 4},
remain: []int{0, 1, 2, 3},
},
{
description: "no containers should be removed if both max container and per pod container limits are not set",
containers: []containerTemplate{
makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
},
policy: &kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: -1},
remain: []int{0, 1, 2},
},
{
description: "recently started containers should not be removed",
containers: []containerTemplate{
makeGCContainer("foo", "bar", 2, time.Now().UnixNano(), runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, time.Now().UnixNano(), runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, time.Now().UnixNano(), runtimeapi.ContainerState_CONTAINER_EXITED),
},
remain: []int{0, 1, 2},
},
{
description: "oldest containers should be removed when per pod container limit exceeded",
containers: []containerTemplate{
makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
},
remain: []int{0, 1},
},
{
description: "running containers should not be removed",
containers: []containerTemplate{
makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_RUNNING),
},
remain: []int{0, 1, 2},
},
{
description: "no containers should be removed when limits are not exceeded",
containers: []containerTemplate{
makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
},
remain: []int{0, 1},
},
{
description: "max container count should apply per (UID, container) pair",
containers: []containerTemplate{
makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "baz", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "baz", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "baz", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo2", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo2", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo2", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
},
remain: []int{0, 1, 3, 4, 6, 7},
},
{
description: "max limit should apply and try to keep from every pod",
containers: []containerTemplate{
makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "bar1", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "bar1", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo2", "bar2", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo2", "bar2", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo3", "bar3", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo3", "bar3", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo4", "bar4", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo4", "bar4", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
},
remain: []int{0, 2, 4, 6, 8},
},
{
description: "oldest pods should be removed if limit exceeded",
containers: []containerTemplate{
makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "bar1", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "bar1", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo2", "bar2", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo3", "bar3", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo4", "bar4", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo5", "bar5", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo6", "bar6", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo7", "bar7", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
},
remain: []int{0, 2, 4, 6, 8, 9},
},
{
description: "containers for deleted pods should be removed",
containers: []containerTemplate{
makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
// deleted pods still respect MinAge.
makeGCContainer("deleted", "bar1", 2, time.Now().UnixNano(), runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("deleted", "bar1", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("deleted", "bar1", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
},
remain: []int{0, 1, 2},
},
} {
t.Logf("TestCase #%d: %+v", c, test)
fakeContainers := makeFakeContainers(t, m, test.containers)
fakeRuntime.SetFakeContainers(fakeContainers)
if test.policy == nil {
test.policy = &defaultGCPolicy
}
err := m.containerGC.evictContainers(*test.policy, true)
assert.NoError(t, err)
realRemain, err := fakeRuntime.ListContainers(nil)
assert.NoError(t, err)
assert.Len(t, realRemain, len(test.remain))
for _, remain := range test.remain {
status, err := fakeRuntime.ContainerStatus(fakeContainers[remain].GetId())
assert.NoError(t, err)
assert.Equal(t, &fakeContainers[remain].ContainerStatus, status)
}
}
}
// Notice that legacy container symlink is not tested since it may be deprecated soon.
func TestPodLogDirectoryGC(t *testing.T) {
_, _, m, err := createTestRuntimeManager()
assert.NoError(t, err)
fakeOS := m.osInterface.(*containertest.FakeOS)
fakePodGetter := m.containerGC.podGetter.(*fakePodGetter)
// pod log directories without corresponding pods should be removed.
fakePodGetter.pods["123"] = makeTestPod("foo1", "new", "123", nil)
fakePodGetter.pods["456"] = makeTestPod("foo2", "new", "456", nil)
files := []string{"123", "456", "789", "012"}
removed := []string{filepath.Join(podLogsRootDirectory, "789"), filepath.Join(podLogsRootDirectory, "012")}
ctrl := gomock.NewController(t)
defer ctrl.Finish()
fakeOS.ReadDirFn = func(string) ([]os.FileInfo, error) {
var fileInfos []os.FileInfo
for _, file := range files {
mockFI := containertest.NewMockFileInfo(ctrl)
mockFI.EXPECT().Name().Return(file)
fileInfos = append(fileInfos, mockFI)
}
return fileInfos, nil
}
// allSourcesReady == true, pod log directories without corresponding pod should be removed.
err = m.containerGC.evictPodLogsDirectories(true)
assert.NoError(t, err)
assert.Equal(t, removed, fakeOS.Removes)
// allSourcesReady == false, pod log directories should not be removed.
fakeOS.Removes = []string{}
err = m.containerGC.evictPodLogsDirectories(false)
assert.NoError(t, err)
assert.Empty(t, fakeOS.Removes)
}

View file

@ -0,0 +1,139 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/credentialprovider"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
"k8s.io/kubernetes/pkg/util/parsers"
)
// PullImage pulls an image from the network to local storage using the supplied
// secrets if necessary.
func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pullSecrets []v1.Secret) error {
img := image.Image
repoToPull, _, _, err := parsers.ParseImageName(img)
if err != nil {
return err
}
keyring, err := credentialprovider.MakeDockerKeyring(pullSecrets, m.keyring)
if err != nil {
return err
}
imgSpec := &runtimeapi.ImageSpec{Image: &img}
creds, withCredentials := keyring.Lookup(repoToPull)
if !withCredentials {
glog.V(3).Infof("Pulling image %q without credentials", img)
err = m.imageService.PullImage(imgSpec, nil)
if err != nil {
glog.Errorf("Pull image %q failed: %v", img, err)
return err
}
return nil
}
var pullErrs []error
for _, currentCreds := range creds {
authConfig := credentialprovider.LazyProvide(currentCreds)
auth := &runtimeapi.AuthConfig{
Username: &authConfig.Username,
Password: &authConfig.Password,
Auth: &authConfig.Auth,
ServerAddress: &authConfig.ServerAddress,
IdentityToken: &authConfig.IdentityToken,
RegistryToken: &authConfig.RegistryToken,
}
err = m.imageService.PullImage(imgSpec, auth)
// If there was no error, return success
if err == nil {
return nil
}
pullErrs = append(pullErrs, err)
}
return utilerrors.NewAggregate(pullErrs)
}
// IsImagePresent checks whether the container image is already in the local storage.
func (m *kubeGenericRuntimeManager) IsImagePresent(image kubecontainer.ImageSpec) (bool, error) {
status, err := m.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: &image.Image})
if err != nil {
glog.Errorf("ImageStatus for image %q failed: %v", image, err)
return false, err
}
return status != nil, nil
}
// ListImages gets all images currently on the machine.
func (m *kubeGenericRuntimeManager) ListImages() ([]kubecontainer.Image, error) {
var images []kubecontainer.Image
allImages, err := m.imageService.ListImages(nil)
if err != nil {
glog.Errorf("ListImages failed: %v", err)
return nil, err
}
for _, img := range allImages {
images = append(images, kubecontainer.Image{
ID: img.GetId(),
Size: int64(img.GetSize_()),
RepoTags: img.RepoTags,
RepoDigests: img.RepoDigests,
})
}
return images, nil
}
// RemoveImage removes the specified image.
func (m *kubeGenericRuntimeManager) RemoveImage(image kubecontainer.ImageSpec) error {
err := m.imageService.RemoveImage(&runtimeapi.ImageSpec{Image: &image.Image})
if err != nil {
glog.Errorf("Remove image %q failed: %v", image.Image, err)
return err
}
return nil
}
// ImageStats returns the statistics of the image.
// Notice that current logic doesn't really work for images which share layers (e.g. docker image),
// this is a known issue, and we'll address this by getting imagefs stats directly from CRI.
// TODO: Get imagefs stats directly from CRI.
func (m *kubeGenericRuntimeManager) ImageStats() (*kubecontainer.ImageStats, error) {
allImages, err := m.imageService.ListImages(nil)
if err != nil {
glog.Errorf("ListImages failed: %v", err)
return nil, err
}
stats := &kubecontainer.ImageStats{}
for _, img := range allImages {
stats.TotalStorageBytes += img.GetSize_()
}
return stats, nil
}

View file

@ -0,0 +1,95 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"testing"
"github.com/stretchr/testify/assert"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/util/sets"
)
func TestPullImage(t *testing.T) {
_, _, fakeManager, err := createTestRuntimeManager()
assert.NoError(t, err)
err = fakeManager.PullImage(kubecontainer.ImageSpec{Image: "busybox"}, nil)
assert.NoError(t, err)
images, err := fakeManager.ListImages()
assert.NoError(t, err)
assert.Equal(t, 1, len(images))
assert.Equal(t, images[0].RepoTags, []string{"busybox"})
}
func TestListImages(t *testing.T) {
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
assert.NoError(t, err)
images := []string{"1111", "2222", "3333"}
expected := sets.NewString(images...)
fakeImageService.SetFakeImages(images)
actualImages, err := fakeManager.ListImages()
assert.NoError(t, err)
actual := sets.NewString()
for _, i := range actualImages {
actual.Insert(i.ID)
}
assert.Equal(t, expected.List(), actual.List())
}
func TestIsImagePresent(t *testing.T) {
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
assert.NoError(t, err)
image := "busybox"
fakeImageService.SetFakeImages([]string{image})
present, err := fakeManager.IsImagePresent(kubecontainer.ImageSpec{Image: image})
assert.NoError(t, err)
assert.Equal(t, true, present)
}
func TestRemoveImage(t *testing.T) {
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
assert.NoError(t, err)
err = fakeManager.PullImage(kubecontainer.ImageSpec{Image: "busybox"}, nil)
assert.NoError(t, err)
assert.Equal(t, 1, len(fakeImageService.Images))
err = fakeManager.RemoveImage(kubecontainer.ImageSpec{Image: "busybox"})
assert.NoError(t, err)
assert.Equal(t, 0, len(fakeImageService.Images))
}
func TestImageStats(t *testing.T) {
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
assert.NoError(t, err)
const imageSize = 64
fakeImageService.SetFakeImageSize(imageSize)
images := []string{"1111", "2222", "3333"}
fakeImageService.SetFakeImages(images)
actualStats, err := fakeManager.ImageStats()
assert.NoError(t, err)
expectedStats := &kubecontainer.ImageStats{TotalStorageBytes: imageSize * uint64(len(images))}
assert.Equal(t, expectedStats, actualStats)
}

View file

@ -0,0 +1,386 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"math"
"os"
"time"
"github.com/docker/docker/pkg/jsonlog"
"github.com/fsnotify/fsnotify"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1"
)
// Notice that the current kuberuntime logs implementation doesn't handle
// log rotation.
// * It will not retrieve logs in rotated log file.
// * If log rotation happens when following the log:
// * If the rotation is using create mode, we'll still follow the old file.
// * If the rotation is using copytruncate, we'll be reading at the original position and get nothing.
// TODO(random-liu): Support log rotation.
// streamType is the type of the stream.
type streamType string
const (
stderrType streamType = "stderr"
stdoutType streamType = "stdout"
// timeFormat is the time format used in the log.
timeFormat = time.RFC3339Nano
// blockSize is the block size used in tail.
blockSize = 1024
)
var (
// eol is the end-of-line sign in the log.
eol = []byte{'\n'}
// delimiter is the delimiter for timestamp and streamtype in log line.
delimiter = []byte{' '}
)
// logMessage is the internal log type.
type logMessage struct {
timestamp time.Time
stream streamType
log []byte
}
// reset resets the log to nil.
func (l *logMessage) reset() {
l.timestamp = time.Time{}
l.stream = ""
l.log = nil
}
// logOptions is the internal type of all log options.
type logOptions struct {
tail int64
bytes int64
since time.Time
follow bool
timestamp bool
}
// newLogOptions convert the v1.PodLogOptions to internal logOptions.
func newLogOptions(apiOpts *v1.PodLogOptions, now time.Time) *logOptions {
opts := &logOptions{
tail: -1, // -1 by default which means read all logs.
bytes: -1, // -1 by default which means read all logs.
follow: apiOpts.Follow,
timestamp: apiOpts.Timestamps,
}
if apiOpts.TailLines != nil {
opts.tail = *apiOpts.TailLines
}
if apiOpts.LimitBytes != nil {
opts.bytes = *apiOpts.LimitBytes
}
if apiOpts.SinceSeconds != nil {
opts.since = now.Add(-time.Duration(*apiOpts.SinceSeconds) * time.Second)
}
if apiOpts.SinceTime != nil && apiOpts.SinceTime.After(opts.since) {
opts.since = apiOpts.SinceTime.Time
}
return opts
}
// ReadLogs read the container log and redirect into stdout and stderr.
func ReadLogs(path string, apiOpts *v1.PodLogOptions, stdout, stderr io.Writer) error {
f, err := os.Open(path)
if err != nil {
return fmt.Errorf("failed to open log file %q: %v", path, err)
}
defer f.Close()
// Convert v1.PodLogOptions into internal log options.
opts := newLogOptions(apiOpts, time.Now())
// Search start point based on tail line.
start, err := tail(f, opts.tail)
if err != nil {
return fmt.Errorf("failed to tail %d lines of log file %q: %v", opts.tail, path, err)
}
if _, err := f.Seek(start, os.SEEK_SET); err != nil {
return fmt.Errorf("failed to seek %d in log file %q: %v", start, path, err)
}
// Start parsing the logs.
r := bufio.NewReader(f)
// Do not create watcher here because it is not needed if `Follow` is false.
var watcher *fsnotify.Watcher
var parse parseFunc
writer := newLogWriter(stdout, stderr, opts)
msg := &logMessage{}
for {
l, err := r.ReadBytes(eol[0])
if err != nil {
if err != io.EOF { // This is an real error
return fmt.Errorf("failed to read log file %q: %v", path, err)
}
if !opts.follow {
// Return directly when reading to the end if not follow.
if len(l) > 0 {
glog.Warningf("Incomplete line in log file %q: %q", path, l)
}
glog.V(2).Infof("Finish parsing log file %q", path)
return nil
}
// Reset seek so that if this is an incomplete line,
// it will be read again.
if _, err := f.Seek(-int64(len(l)), os.SEEK_CUR); err != nil {
return fmt.Errorf("failed to reset seek in log file %q: %v", path, err)
}
if watcher == nil {
// Intialize the watcher if it has not been initialized yet.
if watcher, err = fsnotify.NewWatcher(); err != nil {
return fmt.Errorf("failed to create fsnotify watcher: %v", err)
}
defer watcher.Close()
if err := watcher.Add(f.Name()); err != nil {
return fmt.Errorf("failed to watch file %q: %v", f.Name(), err)
}
}
// Wait until the next log change.
if err := waitLogs(watcher); err != nil {
return fmt.Errorf("failed to wait logs for log file %q: %v", path, err)
}
continue
}
if parse == nil {
// Intialize the log parsing function.
parse, err = getParseFunc(l)
if err != nil {
return fmt.Errorf("failed to get parse function: %v", err)
}
}
// Parse the log line.
msg.reset()
if err := parse(l, msg); err != nil {
glog.Errorf("Failed with err %v when parsing log for log file %q: %q", err, path, l)
continue
}
// Write the log line into the stream.
if err := writer.write(msg); err != nil {
if err == errMaximumWrite {
glog.V(2).Infof("Finish parsing log file %q, hit bytes limit %d(bytes)", path, opts.bytes)
return nil
}
glog.Errorf("Failed with err %v when writing log for log file %q: %+v", err, path, msg)
}
}
}
// parseFunc is a function parsing one log line to the internal log type.
// Notice that the caller must make sure logMessage is not nil.
type parseFunc func([]byte, *logMessage) error
var parseFuncs []parseFunc = []parseFunc{
parseCRILog, // CRI log format parse function
parseDockerJSONLog, // Docker JSON log format parse function
}
// parseCRILog parses logs in CRI log format. CRI Log format example:
// 2016-10-06T00:17:09.669794202Z stdout log content 1
// 2016-10-06T00:17:09.669794203Z stderr log content 2
func parseCRILog(log []byte, msg *logMessage) error {
var err error
// Parse timestamp
idx := bytes.Index(log, delimiter)
if idx < 0 {
return fmt.Errorf("timestamp is not found")
}
msg.timestamp, err = time.Parse(timeFormat, string(log[:idx]))
if err != nil {
return fmt.Errorf("unexpected timestamp format %q: %v", timeFormat, err)
}
// Parse stream type
log = log[idx+1:]
idx = bytes.Index(log, delimiter)
if idx < 0 {
return fmt.Errorf("stream type is not found")
}
msg.stream = streamType(log[:idx])
if msg.stream != stdoutType && msg.stream != stderrType {
return fmt.Errorf("unexpected stream type %q", msg.stream)
}
// Get log content
msg.log = log[idx+1:]
return nil
}
// dockerJSONLog is the JSON log buffer used in parseDockerJSONLog.
var dockerJSONLog = &jsonlog.JSONLog{}
// parseDockerJSONLog parses logs in Docker JSON log format. Docker JSON log format
// example:
// {"log":"content 1","stream":"stdout","time":"2016-10-20T18:39:20.57606443Z"}
// {"log":"content 2","stream":"stderr","time":"2016-10-20T18:39:20.57606444Z"}
func parseDockerJSONLog(log []byte, msg *logMessage) error {
dockerJSONLog.Reset()
l := dockerJSONLog
// TODO: JSON decoding is fairly expensive, we should evaluate this.
if err := json.Unmarshal(log, l); err != nil {
return fmt.Errorf("failed with %v to unmarshal log %q", err, l)
}
msg.timestamp = l.Created
msg.stream = streamType(l.Stream)
msg.log = []byte(l.Log)
return nil
}
// getParseFunc returns proper parse function based on the sample log line passed in.
func getParseFunc(log []byte) (parseFunc, error) {
for _, p := range parseFuncs {
if err := p(log, &logMessage{}); err == nil {
return p, nil
}
}
return nil, fmt.Errorf("unsupported log format: %q", log)
}
// waitLogs wait for the next log write.
func waitLogs(w *fsnotify.Watcher) error {
errRetry := 5
for {
select {
case e := <-w.Events:
switch e.Op {
case fsnotify.Write:
return nil
default:
glog.Errorf("Unexpected fsnotify event: %v, retrying...", e)
}
case err := <-w.Errors:
glog.Errorf("Fsnotify watch error: %v, %d error retries remaining", err, errRetry)
if errRetry == 0 {
return err
}
errRetry--
}
}
}
// logWriter controls the writing into the stream based on the log options.
type logWriter struct {
stdout io.Writer
stderr io.Writer
opts *logOptions
remain int64
}
// errMaximumWrite is returned when all bytes have been written.
var errMaximumWrite = errors.New("maximum write")
func newLogWriter(stdout io.Writer, stderr io.Writer, opts *logOptions) *logWriter {
w := &logWriter{
stdout: stdout,
stderr: stderr,
opts: opts,
remain: math.MaxInt64, // initialize it as infinity
}
if opts.bytes >= 0 {
w.remain = opts.bytes
}
return w
}
// writeLogs writes logs into stdout, stderr.
func (w *logWriter) write(msg *logMessage) error {
if msg.timestamp.Before(w.opts.since) {
// Skip the line because it's older than since
return nil
}
line := msg.log
if w.opts.timestamp {
prefix := append([]byte(msg.timestamp.Format(timeFormat)), delimiter[0])
line = append(prefix, line...)
}
// If the line is longer than the remaining bytes, cut it.
if int64(len(line)) > w.remain {
line = line[:w.remain]
}
// Get the proper stream to write to.
var stream io.Writer
switch msg.stream {
case stdoutType:
stream = w.stdout
case stderrType:
stream = w.stderr
default:
return fmt.Errorf("unexpected stream type %q", msg.stream)
}
n, err := stream.Write(line)
w.remain -= int64(n)
if err != nil {
return err
}
// If there are no more bytes left, return errMaximumWrite
if w.remain <= 0 {
return errMaximumWrite
}
return nil
}
// tail returns the start of last nth line.
// * If n < 0, return the beginning of the file.
// * If n >= 0, return the beginning of last nth line.
// Notice that if the last line is incomplete (no end-of-line), it will not be counted
// as one line.
func tail(f io.ReadSeeker, n int64) (int64, error) {
if n < 0 {
return 0, nil
}
size, err := f.Seek(0, os.SEEK_END)
if err != nil {
return 0, err
}
var left, cnt int64
buf := make([]byte, blockSize)
for right := size; right > 0 && cnt <= n; right -= blockSize {
left = right - blockSize
if left < 0 {
left = 0
buf = make([]byte, right)
}
if _, err := f.Seek(left, os.SEEK_SET); err != nil {
return 0, err
}
if _, err := f.Read(buf); err != nil {
return 0, err
}
cnt += int64(bytes.Count(buf, eol))
}
for ; cnt > n; cnt-- {
idx := bytes.Index(buf, eol) + 1
buf = buf[idx:]
left += int64(idx)
}
return left, nil
}

View file

@ -0,0 +1,269 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"bytes"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
)
func TestLogOptions(t *testing.T) {
var (
line = int64(8)
bytes = int64(64)
timestamp = metav1.Now()
sinceseconds = int64(10)
)
for c, test := range []struct {
apiOpts *v1.PodLogOptions
expect *logOptions
}{
{ // empty options
apiOpts: &v1.PodLogOptions{},
expect: &logOptions{tail: -1, bytes: -1},
},
{ // test tail lines
apiOpts: &v1.PodLogOptions{TailLines: &line},
expect: &logOptions{tail: line, bytes: -1},
},
{ // test limit bytes
apiOpts: &v1.PodLogOptions{LimitBytes: &bytes},
expect: &logOptions{tail: -1, bytes: bytes},
},
{ // test since timestamp
apiOpts: &v1.PodLogOptions{SinceTime: &timestamp},
expect: &logOptions{tail: -1, bytes: -1, since: timestamp.Time},
},
{ // test since seconds
apiOpts: &v1.PodLogOptions{SinceSeconds: &sinceseconds},
expect: &logOptions{tail: -1, bytes: -1, since: timestamp.Add(-10 * time.Second)},
},
} {
t.Logf("TestCase #%d: %+v", c, test)
opts := newLogOptions(test.apiOpts, timestamp.Time)
assert.Equal(t, test.expect, opts)
}
}
func TestParseLog(t *testing.T) {
timestamp, err := time.Parse(timeFormat, "2016-10-20T18:39:20.57606443Z")
assert.NoError(t, err)
msg := &logMessage{}
for c, test := range []struct {
line string
msg *logMessage
err bool
}{
{ // Docker log format stdout
line: `{"log":"docker stdout test log","stream":"stdout","time":"2016-10-20T18:39:20.57606443Z"}` + "\n",
msg: &logMessage{
timestamp: timestamp,
stream: stdoutType,
log: []byte("docker stdout test log"),
},
},
{ // Docker log format stderr
line: `{"log":"docker stderr test log","stream":"stderr","time":"2016-10-20T18:39:20.57606443Z"}` + "\n",
msg: &logMessage{
timestamp: timestamp,
stream: stderrType,
log: []byte("docker stderr test log"),
},
},
{ // CRI log format stdout
line: "2016-10-20T18:39:20.57606443Z stdout cri stdout test log\n",
msg: &logMessage{
timestamp: timestamp,
stream: stdoutType,
log: []byte("cri stdout test log\n"),
},
},
{ // CRI log format stderr
line: "2016-10-20T18:39:20.57606443Z stderr cri stderr test log\n",
msg: &logMessage{
timestamp: timestamp,
stream: stderrType,
log: []byte("cri stderr test log\n"),
},
},
{ // Unsupported Log format
line: "unsupported log format test log\n",
msg: &logMessage{},
err: true,
},
} {
t.Logf("TestCase #%d: %+v", c, test)
parse, err := getParseFunc([]byte(test.line))
if test.err {
assert.Error(t, err)
continue
}
assert.NoError(t, err)
err = parse([]byte(test.line), msg)
assert.NoError(t, err)
assert.Equal(t, test.msg, msg)
}
}
func TestWriteLogs(t *testing.T) {
timestamp := time.Unix(1234, 4321)
log := "abcdefg\n"
for c, test := range []struct {
stream streamType
since time.Time
timestamp bool
expectStdout string
expectStderr string
}{
{ // stderr log
stream: stderrType,
expectStderr: log,
},
{ // stdout log
stream: stdoutType,
expectStdout: log,
},
{ // since is after timestamp
stream: stdoutType,
since: timestamp.Add(1 * time.Second),
},
{ // timestamp enabled
stream: stderrType,
timestamp: true,
expectStderr: timestamp.Format(timeFormat) + " " + log,
},
} {
t.Logf("TestCase #%d: %+v", c, test)
msg := &logMessage{
timestamp: timestamp,
stream: test.stream,
log: []byte(log),
}
stdoutBuf := bytes.NewBuffer(nil)
stderrBuf := bytes.NewBuffer(nil)
w := newLogWriter(stdoutBuf, stderrBuf, &logOptions{since: test.since, timestamp: test.timestamp, bytes: -1})
err := w.write(msg)
assert.NoError(t, err)
assert.Equal(t, test.expectStdout, stdoutBuf.String())
assert.Equal(t, test.expectStderr, stderrBuf.String())
}
}
func TestWriteLogsWithBytesLimit(t *testing.T) {
timestamp := time.Unix(1234, 4321)
timestampStr := timestamp.Format(timeFormat)
log := "abcdefg\n"
for c, test := range []struct {
stdoutLines int
stderrLines int
bytes int
timestamp bool
expectStdout string
expectStderr string
}{
{ // limit bytes less than one line
stdoutLines: 3,
bytes: 3,
expectStdout: "abc",
},
{ // limit bytes accross lines
stdoutLines: 3,
bytes: len(log) + 3,
expectStdout: "abcdefg\nabc",
},
{ // limit bytes more than all lines
stdoutLines: 3,
bytes: 3 * len(log),
expectStdout: "abcdefg\nabcdefg\nabcdefg\n",
},
{ // limit bytes for stderr
stderrLines: 3,
bytes: len(log) + 3,
expectStderr: "abcdefg\nabc",
},
{ // limit bytes for both stdout and stderr, stdout first.
stdoutLines: 1,
stderrLines: 2,
bytes: len(log) + 3,
expectStdout: "abcdefg\n",
expectStderr: "abc",
},
{ // limit bytes with timestamp
stdoutLines: 3,
timestamp: true,
bytes: len(timestampStr) + 1 + len(log) + 2,
expectStdout: timestampStr + " " + log + timestampStr[:2],
},
} {
t.Logf("TestCase #%d: %+v", c, test)
msg := &logMessage{
timestamp: timestamp,
log: []byte(log),
}
stdoutBuf := bytes.NewBuffer(nil)
stderrBuf := bytes.NewBuffer(nil)
w := newLogWriter(stdoutBuf, stderrBuf, &logOptions{timestamp: test.timestamp, bytes: int64(test.bytes)})
for i := 0; i < test.stdoutLines; i++ {
msg.stream = stdoutType
if err := w.write(msg); err != nil {
assert.EqualError(t, err, errMaximumWrite.Error())
}
}
for i := 0; i < test.stderrLines; i++ {
msg.stream = stderrType
if err := w.write(msg); err != nil {
assert.EqualError(t, err, errMaximumWrite.Error())
}
}
assert.Equal(t, test.expectStdout, stdoutBuf.String())
assert.Equal(t, test.expectStderr, stderrBuf.String())
}
}
func TestTail(t *testing.T) {
line := strings.Repeat("a", blockSize)
testBytes := []byte(line + "\n" +
line + "\n" +
line + "\n" +
line + "\n" +
line[blockSize/2:]) // incomplete line
for c, test := range []struct {
n int64
start int64
}{
{n: -1, start: 0},
{n: 0, start: int64(len(line)+1) * 4},
{n: 1, start: int64(len(line)+1) * 3},
{n: 9999, start: 0},
} {
t.Logf("TestCase #%d: %+v", c, test)
r := bytes.NewReader(testBytes)
s, err := tail(r, test.n)
assert.NoError(t, err)
assert.Equal(t, s, test.start)
}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,263 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"fmt"
"net"
"net/url"
"sort"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/format"
kubetypes "k8s.io/kubernetes/pkg/types"
)
// createPodSandbox creates a pod sandbox and returns (podSandBoxID, message, error).
func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32) (string, string, error) {
podSandboxConfig, err := m.generatePodSandboxConfig(pod, attempt)
if err != nil {
message := fmt.Sprintf("GeneratePodSandboxConfig for pod %q failed: %v", format.Pod(pod), err)
glog.Error(message)
return "", message, err
}
// Create pod logs directory
err = m.osInterface.MkdirAll(podSandboxConfig.GetLogDirectory(), 0755)
if err != nil {
message := fmt.Sprintf("Create pod log directory for pod %q failed: %v", format.Pod(pod), err)
glog.Errorf(message)
return "", message, err
}
podSandBoxID, err := m.runtimeService.RunPodSandbox(podSandboxConfig)
if err != nil {
message := fmt.Sprintf("CreatePodSandbox for pod %q failed: %v", format.Pod(pod), err)
glog.Error(message)
return "", message, err
}
return podSandBoxID, "", nil
}
// generatePodSandboxConfig generates pod sandbox config from v1.Pod.
func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attempt uint32) (*runtimeapi.PodSandboxConfig, error) {
// TODO: deprecating podsandbox resource requirements in favor of the pod level cgroup
// Refer https://github.com/kubernetes/kubernetes/issues/29871
podUID := string(pod.UID)
podSandboxConfig := &runtimeapi.PodSandboxConfig{
Metadata: &runtimeapi.PodSandboxMetadata{
Name: &pod.Name,
Namespace: &pod.Namespace,
Uid: &podUID,
Attempt: &attempt,
},
Labels: newPodLabels(pod),
Annotations: newPodAnnotations(pod),
}
if !kubecontainer.IsHostNetworkPod(pod) {
dnsServers, dnsSearches, err := m.runtimeHelper.GetClusterDNS(pod)
if err != nil {
return nil, err
}
podSandboxConfig.DnsConfig = &runtimeapi.DNSConfig{
Servers: dnsServers,
Searches: dnsSearches,
Options: defaultDNSOptions,
}
// TODO: Add domain support in new runtime interface
hostname, _, err := m.runtimeHelper.GeneratePodHostNameAndDomain(pod)
if err != nil {
return nil, err
}
podSandboxConfig.Hostname = &hostname
}
logDir := buildPodLogsDirectory(pod.UID)
podSandboxConfig.LogDirectory = &logDir
cgroupParent := ""
portMappings := []*runtimeapi.PortMapping{}
for _, c := range pod.Spec.Containers {
// TODO: use a separate interface to only generate portmappings
opts, err := m.runtimeHelper.GenerateRunContainerOptions(pod, &c, "")
if err != nil {
return nil, err
}
for idx := range opts.PortMappings {
port := opts.PortMappings[idx]
hostPort := int32(port.HostPort)
containerPort := int32(port.ContainerPort)
protocol := toRuntimeProtocol(port.Protocol)
portMappings = append(portMappings, &runtimeapi.PortMapping{
HostIp: &port.HostIP,
HostPort: &hostPort,
ContainerPort: &containerPort,
Protocol: &protocol,
})
}
// TODO: refactor kubelet to get cgroup parent for pod instead of containers
cgroupParent = opts.CgroupParent
}
podSandboxConfig.Linux = m.generatePodSandboxLinuxConfig(pod, cgroupParent)
if len(portMappings) > 0 {
podSandboxConfig.PortMappings = portMappings
}
return podSandboxConfig, nil
}
// generatePodSandboxLinuxConfig generates LinuxPodSandboxConfig from v1.Pod.
func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod, cgroupParent string) *runtimeapi.LinuxPodSandboxConfig {
if pod.Spec.SecurityContext == nil && cgroupParent == "" {
return nil
}
lc := &runtimeapi.LinuxPodSandboxConfig{}
if cgroupParent != "" {
lc.CgroupParent = &cgroupParent
}
if pod.Spec.SecurityContext != nil {
sc := pod.Spec.SecurityContext
lc.SecurityContext = &runtimeapi.LinuxSandboxSecurityContext{
NamespaceOptions: &runtimeapi.NamespaceOption{
HostNetwork: &pod.Spec.HostNetwork,
HostIpc: &pod.Spec.HostIPC,
HostPid: &pod.Spec.HostPID,
},
RunAsUser: sc.RunAsUser,
}
if sc.FSGroup != nil {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, *sc.FSGroup)
}
if groups := m.runtimeHelper.GetExtraSupplementalGroupsForPod(pod); len(groups) > 0 {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, groups...)
}
if sc.SupplementalGroups != nil {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, sc.SupplementalGroups...)
}
if sc.SELinuxOptions != nil {
lc.SecurityContext.SelinuxOptions = &runtimeapi.SELinuxOption{
User: &sc.SELinuxOptions.User,
Role: &sc.SELinuxOptions.Role,
Type: &sc.SELinuxOptions.Type,
Level: &sc.SELinuxOptions.Level,
}
}
}
return lc
}
// getKubeletSandboxes lists all (or just the running) sandboxes managed by kubelet.
func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeapi.PodSandbox, error) {
var filter *runtimeapi.PodSandboxFilter
if !all {
readyState := runtimeapi.PodSandboxState_SANDBOX_READY
filter = &runtimeapi.PodSandboxFilter{
State: &readyState,
}
}
resp, err := m.runtimeService.ListPodSandbox(filter)
if err != nil {
glog.Errorf("ListPodSandbox failed: %v", err)
return nil, err
}
result := []*runtimeapi.PodSandbox{}
for _, s := range resp {
if !isManagedByKubelet(s.Labels) {
glog.V(5).Infof("Sandbox %s is not managed by kubelet", kubecontainer.BuildPodFullName(
s.Metadata.GetName(), s.Metadata.GetNamespace()))
continue
}
result = append(result, s)
}
return result, nil
}
// determinePodSandboxIP determines the IP address of the given pod sandbox.
func (m *kubeGenericRuntimeManager) determinePodSandboxIP(podNamespace, podName string, podSandbox *runtimeapi.PodSandboxStatus) string {
if podSandbox.Network == nil {
glog.Warningf("Pod Sandbox status doesn't have network information, cannot report IP")
return ""
}
ip := podSandbox.Network.GetIp()
if net.ParseIP(ip) == nil {
glog.Warningf("Pod Sandbox reported an unparseable IP %v", ip)
return ""
}
return ip
}
// getPodSandboxID gets the sandbox id by podUID and returns ([]sandboxID, error).
// Param state could be nil in order to get all sandboxes belonging to same pod.
func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(podUID kubetypes.UID, state *runtimeapi.PodSandboxState) ([]string, error) {
filter := &runtimeapi.PodSandboxFilter{
State: state,
LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(podUID)},
}
sandboxes, err := m.runtimeService.ListPodSandbox(filter)
if err != nil {
glog.Errorf("ListPodSandbox with pod UID %q failed: %v", podUID, err)
return nil, err
}
if len(sandboxes) == 0 {
return nil, nil
}
// Sort with newest first.
sandboxIDs := make([]string, len(sandboxes))
sort.Sort(podSandboxByCreated(sandboxes))
for i, s := range sandboxes {
sandboxIDs[i] = s.GetId()
}
return sandboxIDs, nil
}
// GetPortForward gets the endpoint the runtime will serve the port-forward request from.
func (m *kubeGenericRuntimeManager) GetPortForward(podName, podNamespace string, podUID kubetypes.UID) (*url.URL, error) {
sandboxIDs, err := m.getSandboxIDByPodUID(podUID, nil)
if err != nil {
return nil, fmt.Errorf("failed to find sandboxID for pod %s: %v", format.PodDesc(podName, podNamespace, podUID), err)
}
if len(sandboxIDs) == 0 {
return nil, fmt.Errorf("failed to find sandboxID for pod %s", format.PodDesc(podName, podNamespace, podUID))
}
// TODO: Port is unused for now, but we may need it in the future.
req := &runtimeapi.PortForwardRequest{
PodSandboxId: &sandboxIDs[0],
}
resp, err := m.runtimeService.PortForward(req)
if err != nil {
return nil, err
}
return url.Parse(resp.GetUrl())
}

View file

@ -0,0 +1,64 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api/v1"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
)
// TestCreatePodSandbox tests creating sandbox and its corresponding pod log directory.
func TestCreatePodSandbox(t *testing.T) {
fakeRuntime, _, m, err := createTestRuntimeManager()
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Image: "busybox",
ImagePullPolicy: v1.PullIfNotPresent,
},
},
},
}
fakeOS := m.osInterface.(*containertest.FakeOS)
fakeOS.MkdirAllFn = func(path string, perm os.FileMode) error {
// Check pod logs root directory is created.
assert.Equal(t, filepath.Join(podLogsRootDirectory, "12345678"), path)
assert.Equal(t, os.FileMode(0755), perm)
return nil
}
id, _, err := m.createPodSandbox(pod, 1)
assert.NoError(t, err)
fakeRuntime.AssertCalls([]string{"RunPodSandbox"})
sandboxes, err := fakeRuntime.ListPodSandbox(&runtimeapi.PodSandboxFilter{Id: &id})
assert.NoError(t, err)
assert.Equal(t, len(sandboxes), 1)
// TODO Check pod sandbox configuration
}

View file

@ -0,0 +1,289 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"encoding/json"
"strconv"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/format"
kubetypes "k8s.io/kubernetes/pkg/types"
)
const (
// TODO: move those label definitions to kubelet/types/labels.go
// TODO: change those label names to follow kubernetes's format
podDeletionGracePeriodLabel = "io.kubernetes.pod.deletionGracePeriod"
podTerminationGracePeriodLabel = "io.kubernetes.pod.terminationGracePeriod"
containerHashLabel = "io.kubernetes.container.hash"
containerRestartCountLabel = "io.kubernetes.container.restartCount"
containerTerminationMessagePathLabel = "io.kubernetes.container.terminationMessagePath"
containerPreStopHandlerLabel = "io.kubernetes.container.preStopHandler"
containerPortsLabel = "io.kubernetes.container.ports"
// kubernetesManagedLabel is used to distinguish whether a container/sandbox is managed by kubelet or not
kubernetesManagedLabel = "io.kubernetes.managed"
)
type labeledPodSandboxInfo struct {
// Labels from v1.Pod
Labels map[string]string
PodName string
PodNamespace string
PodUID kubetypes.UID
}
type annotatedPodSandboxInfo struct {
// Annotations from v1.Pod
Annotations map[string]string
}
type labeledContainerInfo struct {
ContainerName string
PodName string
PodNamespace string
PodUID kubetypes.UID
}
type annotatedContainerInfo struct {
Hash uint64
RestartCount int
PodDeletionGracePeriod *int64
PodTerminationGracePeriod *int64
TerminationMessagePath string
PreStopHandler *v1.Handler
ContainerPorts []v1.ContainerPort
}
// newPodLabels creates pod labels from v1.Pod.
func newPodLabels(pod *v1.Pod) map[string]string {
labels := map[string]string{}
// Get labels from v1.Pod
for k, v := range pod.Labels {
labels[k] = v
}
labels[types.KubernetesPodNameLabel] = pod.Name
labels[types.KubernetesPodNamespaceLabel] = pod.Namespace
labels[types.KubernetesPodUIDLabel] = string(pod.UID)
labels[kubernetesManagedLabel] = "true"
return labels
}
// newPodAnnotations creates pod annotations from v1.Pod.
func newPodAnnotations(pod *v1.Pod) map[string]string {
return pod.Annotations
}
// newContainerLabels creates container labels from v1.Container and v1.Pod.
func newContainerLabels(container *v1.Container, pod *v1.Pod) map[string]string {
labels := map[string]string{}
labels[types.KubernetesPodNameLabel] = pod.Name
labels[types.KubernetesPodNamespaceLabel] = pod.Namespace
labels[types.KubernetesPodUIDLabel] = string(pod.UID)
labels[types.KubernetesContainerNameLabel] = container.Name
labels[kubernetesManagedLabel] = "true"
return labels
}
// newContainerAnnotations creates container annotations from v1.Container and v1.Pod.
func newContainerAnnotations(container *v1.Container, pod *v1.Pod, restartCount int) map[string]string {
annotations := map[string]string{}
annotations[containerHashLabel] = strconv.FormatUint(kubecontainer.HashContainer(container), 16)
annotations[containerRestartCountLabel] = strconv.Itoa(restartCount)
annotations[containerTerminationMessagePathLabel] = container.TerminationMessagePath
if pod.DeletionGracePeriodSeconds != nil {
annotations[podDeletionGracePeriodLabel] = strconv.FormatInt(*pod.DeletionGracePeriodSeconds, 10)
}
if pod.Spec.TerminationGracePeriodSeconds != nil {
annotations[podTerminationGracePeriodLabel] = strconv.FormatInt(*pod.Spec.TerminationGracePeriodSeconds, 10)
}
if container.Lifecycle != nil && container.Lifecycle.PreStop != nil {
// Using json enconding so that the PreStop handler object is readable after writing as a label
rawPreStop, err := json.Marshal(container.Lifecycle.PreStop)
if err != nil {
glog.Errorf("Unable to marshal lifecycle PreStop handler for container %q of pod %q: %v", container.Name, format.Pod(pod), err)
} else {
annotations[containerPreStopHandlerLabel] = string(rawPreStop)
}
}
if len(container.Ports) > 0 {
rawContainerPorts, err := json.Marshal(container.Ports)
if err != nil {
glog.Errorf("Unable to marshal container ports for container %q for pod %q: %v", container.Name, format.Pod(pod), err)
} else {
annotations[containerPortsLabel] = string(rawContainerPorts)
}
}
return annotations
}
// getPodSandboxInfoFromLabels gets labeledPodSandboxInfo from labels.
func getPodSandboxInfoFromLabels(labels map[string]string) *labeledPodSandboxInfo {
podSandboxInfo := &labeledPodSandboxInfo{
Labels: make(map[string]string),
PodName: getStringValueFromLabel(labels, types.KubernetesPodNameLabel),
PodNamespace: getStringValueFromLabel(labels, types.KubernetesPodNamespaceLabel),
PodUID: kubetypes.UID(getStringValueFromLabel(labels, types.KubernetesPodUIDLabel)),
}
// Remain only labels from v1.Pod
for k, v := range labels {
if k != types.KubernetesPodNameLabel && k != types.KubernetesPodNamespaceLabel && k != types.KubernetesPodUIDLabel && k != kubernetesManagedLabel {
podSandboxInfo.Labels[k] = v
}
}
return podSandboxInfo
}
// getPodSandboxInfoFromAnnotations gets annotatedPodSandboxInfo from annotations.
func getPodSandboxInfoFromAnnotations(annotations map[string]string) *annotatedPodSandboxInfo {
return &annotatedPodSandboxInfo{
Annotations: annotations,
}
}
// getContainerInfoFromLabels gets labeledContainerInfo from labels.
func getContainerInfoFromLabels(labels map[string]string) *labeledContainerInfo {
return &labeledContainerInfo{
PodName: getStringValueFromLabel(labels, types.KubernetesPodNameLabel),
PodNamespace: getStringValueFromLabel(labels, types.KubernetesPodNamespaceLabel),
PodUID: kubetypes.UID(getStringValueFromLabel(labels, types.KubernetesPodUIDLabel)),
ContainerName: getStringValueFromLabel(labels, types.KubernetesContainerNameLabel),
}
}
// isManagedByKubelet returns true is the sandbox/container is managed by kubelet.
func isManagedByKubelet(labels map[string]string) bool {
if _, ok := labels[kubernetesManagedLabel]; ok {
return true
}
return false
}
// getContainerInfoFromAnnotations gets annotatedContainerInfo from annotations.
func getContainerInfoFromAnnotations(annotations map[string]string) *annotatedContainerInfo {
var err error
containerInfo := &annotatedContainerInfo{
TerminationMessagePath: getStringValueFromLabel(annotations, containerTerminationMessagePathLabel),
}
if containerInfo.Hash, err = getUint64ValueFromLabel(annotations, containerHashLabel); err != nil {
glog.Errorf("Unable to get %q from annotations %q: %v", containerHashLabel, annotations, err)
}
if containerInfo.RestartCount, err = getIntValueFromLabel(annotations, containerRestartCountLabel); err != nil {
glog.Errorf("Unable to get %q from annotations %q: %v", containerRestartCountLabel, annotations, err)
}
if containerInfo.PodDeletionGracePeriod, err = getInt64PointerFromLabel(annotations, podDeletionGracePeriodLabel); err != nil {
glog.Errorf("Unable to get %q from annotations %q: %v", podDeletionGracePeriodLabel, annotations, err)
}
if containerInfo.PodTerminationGracePeriod, err = getInt64PointerFromLabel(annotations, podTerminationGracePeriodLabel); err != nil {
glog.Errorf("Unable to get %q from annotations %q: %v", podTerminationGracePeriodLabel, annotations, err)
}
preStopHandler := &v1.Handler{}
if found, err := getJSONObjectFromLabel(annotations, containerPreStopHandlerLabel, preStopHandler); err != nil {
glog.Errorf("Unable to get %q from annotations %q: %v", containerPreStopHandlerLabel, annotations, err)
} else if found {
containerInfo.PreStopHandler = preStopHandler
}
containerPorts := []v1.ContainerPort{}
if found, err := getJSONObjectFromLabel(annotations, containerPortsLabel, &containerPorts); err != nil {
glog.Errorf("Unable to get %q from annotations %q: %v", containerPortsLabel, annotations, err)
} else if found {
containerInfo.ContainerPorts = containerPorts
}
return containerInfo
}
func getStringValueFromLabel(labels map[string]string, label string) string {
if value, found := labels[label]; found {
return value
}
// Do not report error, because there should be many old containers without label now.
glog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label)
// Return empty string "" for these containers, the caller will get value by other ways.
return ""
}
func getIntValueFromLabel(labels map[string]string, label string) (int, error) {
if strValue, found := labels[label]; found {
intValue, err := strconv.Atoi(strValue)
if err != nil {
// This really should not happen. Just set value to 0 to handle this abnormal case
return 0, err
}
return intValue, nil
}
// Do not report error, because there should be many old containers without label now.
glog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label)
// Just set the value to 0
return 0, nil
}
func getUint64ValueFromLabel(labels map[string]string, label string) (uint64, error) {
if strValue, found := labels[label]; found {
intValue, err := strconv.ParseUint(strValue, 16, 64)
if err != nil {
// This really should not happen. Just set value to 0 to handle this abnormal case
return 0, err
}
return intValue, nil
}
// Do not report error, because there should be many old containers without label now.
glog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label)
// Just set the value to 0
return 0, nil
}
func getInt64PointerFromLabel(labels map[string]string, label string) (*int64, error) {
if strValue, found := labels[label]; found {
int64Value, err := strconv.ParseInt(strValue, 10, 64)
if err != nil {
return nil, err
}
return &int64Value, nil
}
// If the label is not found, return pointer nil.
return nil, nil
}
// getJSONObjectFromLabel returns a bool value indicating whether an object is found.
func getJSONObjectFromLabel(labels map[string]string, label string, value interface{}) (bool, error) {
if strValue, found := labels[label]; found {
err := json.Unmarshal([]byte(strValue), value)
return found, err
}
// If the label is not found, return not found.
return false, nil
}

View file

@ -0,0 +1,216 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"reflect"
"testing"
"k8s.io/kubernetes/pkg/api/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/util/intstr"
)
func TestContainerLabels(t *testing.T) {
deletionGracePeriod := int64(10)
terminationGracePeriod := int64(10)
lifecycle := &v1.Lifecycle{
// Left PostStart as nil
PreStop: &v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"action1", "action2"},
},
HTTPGet: &v1.HTTPGetAction{
Path: "path",
Host: "host",
Port: intstr.FromInt(8080),
Scheme: "scheme",
},
TCPSocket: &v1.TCPSocketAction{
Port: intstr.FromString("80"),
},
},
}
container := &v1.Container{
Name: "test_container",
TerminationMessagePath: "/somepath",
Lifecycle: lifecycle,
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "test_pod",
Namespace: "test_pod_namespace",
UID: "test_pod_uid",
DeletionGracePeriodSeconds: &deletionGracePeriod,
},
Spec: v1.PodSpec{
Containers: []v1.Container{*container},
TerminationGracePeriodSeconds: &terminationGracePeriod,
},
}
expected := &labeledContainerInfo{
PodName: pod.Name,
PodNamespace: pod.Namespace,
PodUID: pod.UID,
ContainerName: container.Name,
}
// Test whether we can get right information from label
labels := newContainerLabels(container, pod)
containerInfo := getContainerInfoFromLabels(labels)
if !reflect.DeepEqual(containerInfo, expected) {
t.Errorf("expected %v, got %v", expected, containerInfo)
}
}
func TestContainerAnnotations(t *testing.T) {
restartCount := 5
deletionGracePeriod := int64(10)
terminationGracePeriod := int64(10)
lifecycle := &v1.Lifecycle{
// Left PostStart as nil
PreStop: &v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"action1", "action2"},
},
HTTPGet: &v1.HTTPGetAction{
Path: "path",
Host: "host",
Port: intstr.FromInt(8080),
Scheme: "scheme",
},
TCPSocket: &v1.TCPSocketAction{
Port: intstr.FromString("80"),
},
},
}
containerPorts := []v1.ContainerPort{
{
Name: "http",
HostPort: 80,
ContainerPort: 8080,
Protocol: v1.ProtocolTCP,
},
{
Name: "https",
HostPort: 443,
ContainerPort: 6443,
Protocol: v1.ProtocolTCP,
},
}
container := &v1.Container{
Name: "test_container",
Ports: containerPorts,
TerminationMessagePath: "/somepath",
Lifecycle: lifecycle,
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "test_pod",
Namespace: "test_pod_namespace",
UID: "test_pod_uid",
DeletionGracePeriodSeconds: &deletionGracePeriod,
},
Spec: v1.PodSpec{
Containers: []v1.Container{*container},
TerminationGracePeriodSeconds: &terminationGracePeriod,
},
}
expected := &annotatedContainerInfo{
ContainerPorts: containerPorts,
PodDeletionGracePeriod: pod.DeletionGracePeriodSeconds,
PodTerminationGracePeriod: pod.Spec.TerminationGracePeriodSeconds,
Hash: kubecontainer.HashContainer(container),
RestartCount: restartCount,
TerminationMessagePath: container.TerminationMessagePath,
PreStopHandler: container.Lifecycle.PreStop,
}
// Test whether we can get right information from label
annotations := newContainerAnnotations(container, pod, restartCount)
containerInfo := getContainerInfoFromAnnotations(annotations)
if !reflect.DeepEqual(containerInfo, expected) {
t.Errorf("expected %v, got %v", expected, containerInfo)
}
// Test when DeletionGracePeriodSeconds, TerminationGracePeriodSeconds and Lifecycle are nil,
// the information got from annotations should also be nil
container.Lifecycle = nil
pod.DeletionGracePeriodSeconds = nil
pod.Spec.TerminationGracePeriodSeconds = nil
expected.PodDeletionGracePeriod = nil
expected.PodTerminationGracePeriod = nil
expected.PreStopHandler = nil
// Because container is changed, the Hash should be updated
expected.Hash = kubecontainer.HashContainer(container)
annotations = newContainerAnnotations(container, pod, restartCount)
containerInfo = getContainerInfoFromAnnotations(annotations)
if !reflect.DeepEqual(containerInfo, expected) {
t.Errorf("expected %v, got %v", expected, containerInfo)
}
}
func TestPodLabels(t *testing.T) {
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "test_pod",
Namespace: "test_pod_namespace",
UID: "test_pod_uid",
Labels: map[string]string{"foo": "bar"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{},
},
}
expected := &labeledPodSandboxInfo{
Labels: pod.Labels,
PodName: pod.Name,
PodNamespace: pod.Namespace,
PodUID: pod.UID,
}
// Test whether we can get right information from label
labels := newPodLabels(pod)
podSandboxInfo := getPodSandboxInfoFromLabels(labels)
if !reflect.DeepEqual(podSandboxInfo, expected) {
t.Errorf("expected %v, got %v", expected, podSandboxInfo)
}
}
func TestPodAnnotations(t *testing.T) {
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "test_pod",
Namespace: "test_pod_namespace",
UID: "test_pod_uid",
Annotations: map[string]string{"foo": "bar"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{},
},
}
expected := &annotatedPodSandboxInfo{
Annotations: map[string]string{"foo": "bar"},
}
// Test whether we can get right information from annotations
annotations := newPodAnnotations(pod)
podSandboxInfo := getPodSandboxInfoFromAnnotations(annotations)
if !reflect.DeepEqual(podSandboxInfo, expected) {
t.Errorf("expected %v, got %v", expected, podSandboxInfo)
}
}

View file

@ -0,0 +1,41 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockertools"
)
// This file implements the functions that are needed for backward
// compatibility. Therefore, it imports various kubernetes packages
// directly.
const (
// legacyContainerLogsDir is the legacy location of container logs. It is the same with
// kubelet.containerLogsDir.
legacyContainerLogsDir = "/var/log/containers"
// legacyLogSuffix is the legacy log suffix.
legacyLogSuffix = dockertools.LogSuffix
)
// legacyLogSymlink composes the legacy container log path. It is only used for legacy cluster
// logging support.
func legacyLogSymlink(containerID string, containerName, podName, podNamespace string) string {
return dockertools.LogSymlink(legacyContainerLogsDir, kubecontainer.BuildPodFullName(podName, podNamespace),
containerName, containerID)
}

View file

@ -0,0 +1,132 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"fmt"
"k8s.io/kubernetes/pkg/api/v1"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/securitycontext"
)
// determineEffectiveSecurityContext gets container's security context from v1.Pod and v1.Container.
func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Pod, container *v1.Container, uid *int64, username *string) *runtimeapi.LinuxContainerSecurityContext {
effectiveSc := securitycontext.DetermineEffectiveSecurityContext(pod, container)
synthesized := convertToRuntimeSecurityContext(effectiveSc)
if synthesized == nil {
synthesized = &runtimeapi.LinuxContainerSecurityContext{}
}
// set RunAsUser.
if synthesized.RunAsUser == nil {
synthesized.RunAsUser = uid
synthesized.RunAsUsername = username
}
// set namespace options and supplemental groups.
podSc := pod.Spec.SecurityContext
if podSc == nil {
return synthesized
}
synthesized.NamespaceOptions = &runtimeapi.NamespaceOption{
HostNetwork: &pod.Spec.HostNetwork,
HostIpc: &pod.Spec.HostIPC,
HostPid: &pod.Spec.HostPID,
}
if podSc.FSGroup != nil {
synthesized.SupplementalGroups = append(synthesized.SupplementalGroups, *podSc.FSGroup)
}
if groups := m.runtimeHelper.GetExtraSupplementalGroupsForPod(pod); len(groups) > 0 {
synthesized.SupplementalGroups = append(synthesized.SupplementalGroups, groups...)
}
if podSc.SupplementalGroups != nil {
synthesized.SupplementalGroups = append(synthesized.SupplementalGroups, podSc.SupplementalGroups...)
}
return synthesized
}
// verifyRunAsNonRoot verifies RunAsNonRoot.
func verifyRunAsNonRoot(pod *v1.Pod, container *v1.Container, uid int64) error {
effectiveSc := securitycontext.DetermineEffectiveSecurityContext(pod, container)
if effectiveSc == nil || effectiveSc.RunAsNonRoot == nil {
return nil
}
if effectiveSc.RunAsUser != nil {
if *effectiveSc.RunAsUser == 0 {
return fmt.Errorf("container's runAsUser breaks non-root policy")
}
return nil
}
if uid == 0 {
return fmt.Errorf("container has runAsNonRoot and image will run as root")
}
return nil
}
// convertToRuntimeSecurityContext converts v1.SecurityContext to runtimeapi.SecurityContext.
func convertToRuntimeSecurityContext(securityContext *v1.SecurityContext) *runtimeapi.LinuxContainerSecurityContext {
if securityContext == nil {
return nil
}
return &runtimeapi.LinuxContainerSecurityContext{
RunAsUser: securityContext.RunAsUser,
Privileged: securityContext.Privileged,
ReadonlyRootfs: securityContext.ReadOnlyRootFilesystem,
Capabilities: convertToRuntimeCapabilities(securityContext.Capabilities),
SelinuxOptions: convertToRuntimeSELinuxOption(securityContext.SELinuxOptions),
}
}
// convertToRuntimeSELinuxOption converts v1.SELinuxOptions to runtimeapi.SELinuxOption.
func convertToRuntimeSELinuxOption(opts *v1.SELinuxOptions) *runtimeapi.SELinuxOption {
if opts == nil {
return nil
}
return &runtimeapi.SELinuxOption{
User: &opts.User,
Role: &opts.Role,
Type: &opts.Type,
Level: &opts.Level,
}
}
// convertToRuntimeCapabilities converts v1.Capabilities to runtimeapi.Capability.
func convertToRuntimeCapabilities(opts *v1.Capabilities) *runtimeapi.Capability {
if opts == nil {
return nil
}
capabilities := &runtimeapi.Capability{
AddCapabilities: make([]string, len(opts.Add)),
DropCapabilities: make([]string, len(opts.Drop)),
}
for index, value := range opts.Add {
capabilities.AddCapabilities[index] = string(value)
}
for index, value := range opts.Drop {
capabilities.DropCapabilities[index] = string(value)
}
return capabilities
}