forked from barak/tarpoon
Add glide.yaml and vendor deps
This commit is contained in:
parent
db918f12ad
commit
5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions
60
vendor/k8s.io/kubernetes/pkg/kubelet/images/BUILD
generated
vendored
Normal file
60
vendor/k8s.io/kubernetes/pkg/kubelet/images/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"helpers.go",
|
||||
"image_gc_manager.go",
|
||||
"image_manager.go",
|
||||
"puller.go",
|
||||
"types.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/kubelet/cadvisor:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/events:go_default_library",
|
||||
"//pkg/util/errors:go_default_library",
|
||||
"//pkg/util/flowcontrol:go_default_library",
|
||||
"//pkg/util/parsers:go_default_library",
|
||||
"//pkg/util/sets:go_default_library",
|
||||
"//pkg/util/wait:go_default_library",
|
||||
"//vendor:github.com/docker/distribution/reference",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"image_gc_manager_test.go",
|
||||
"image_manager_test.go",
|
||||
],
|
||||
library = "go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/kubelet/cadvisor/testing:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/container/testing:go_default_library",
|
||||
"//pkg/util/clock:go_default_library",
|
||||
"//pkg/util/flowcontrol:go_default_library",
|
||||
"//vendor:github.com/google/cadvisor/info/v2",
|
||||
"//vendor:github.com/stretchr/testify/assert",
|
||||
"//vendor:github.com/stretchr/testify/require",
|
||||
],
|
||||
)
|
||||
18
vendor/k8s.io/kubernetes/pkg/kubelet/images/doc.go
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/pkg/kubelet/images/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package images is responsible for managing lifecycle of container images.
|
||||
package images
|
||||
50
vendor/k8s.io/kubernetes/pkg/kubelet/images/helpers.go
generated
vendored
Normal file
50
vendor/k8s.io/kubernetes/pkg/kubelet/images/helpers.go
generated
vendored
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package images
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
)
|
||||
|
||||
// throttleImagePulling wraps kubecontainer.ImageService to throttle image
|
||||
// pulling based on the given QPS and burst limits. If QPS is zero, defaults
|
||||
// to no throttling.
|
||||
func throttleImagePulling(imageService kubecontainer.ImageService, qps float32, burst int) kubecontainer.ImageService {
|
||||
if qps == 0.0 {
|
||||
return imageService
|
||||
}
|
||||
return &throttledImageService{
|
||||
ImageService: imageService,
|
||||
limiter: flowcontrol.NewTokenBucketRateLimiter(qps, burst),
|
||||
}
|
||||
}
|
||||
|
||||
type throttledImageService struct {
|
||||
kubecontainer.ImageService
|
||||
limiter flowcontrol.RateLimiter
|
||||
}
|
||||
|
||||
func (ts throttledImageService) PullImage(image kubecontainer.ImageSpec, secrets []v1.Secret) error {
|
||||
if ts.limiter.TryAccept() {
|
||||
return ts.ImageService.PullImage(image, secrets)
|
||||
}
|
||||
return fmt.Errorf("pull QPS exceeded.")
|
||||
}
|
||||
347
vendor/k8s.io/kubernetes/pkg/kubelet/images/image_gc_manager.go
generated
vendored
Normal file
347
vendor/k8s.io/kubernetes/pkg/kubelet/images/image_gc_manager.go
generated
vendored
Normal file
|
|
@ -0,0 +1,347 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package images
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/util/errors"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
)
|
||||
|
||||
// Manages lifecycle of all images.
|
||||
//
|
||||
// Implementation is thread-safe.
|
||||
type ImageGCManager interface {
|
||||
// Applies the garbage collection policy. Errors include being unable to free
|
||||
// enough space as per the garbage collection policy.
|
||||
GarbageCollect() error
|
||||
|
||||
// Start async garbage collection of images.
|
||||
Start() error
|
||||
|
||||
GetImageList() ([]kubecontainer.Image, error)
|
||||
|
||||
// Delete all unused images and returns the number of bytes freed. The number of bytes freed is always returned.
|
||||
DeleteUnusedImages() (int64, error)
|
||||
}
|
||||
|
||||
// A policy for garbage collecting images. Policy defines an allowed band in
|
||||
// which garbage collection will be run.
|
||||
type ImageGCPolicy struct {
|
||||
// Any usage above this threshold will always trigger garbage collection.
|
||||
// This is the highest usage we will allow.
|
||||
HighThresholdPercent int
|
||||
|
||||
// Any usage below this threshold will never trigger garbage collection.
|
||||
// This is the lowest threshold we will try to garbage collect to.
|
||||
LowThresholdPercent int
|
||||
|
||||
// Minimum age at which an image can be garbage collected.
|
||||
MinAge time.Duration
|
||||
}
|
||||
|
||||
type realImageGCManager struct {
|
||||
// Container runtime
|
||||
runtime container.Runtime
|
||||
|
||||
// Records of images and their use.
|
||||
imageRecords map[string]*imageRecord
|
||||
imageRecordsLock sync.Mutex
|
||||
|
||||
// The image garbage collection policy in use.
|
||||
policy ImageGCPolicy
|
||||
|
||||
// cAdvisor instance.
|
||||
cadvisor cadvisor.Interface
|
||||
|
||||
// Recorder for Kubernetes events.
|
||||
recorder record.EventRecorder
|
||||
|
||||
// Reference to this node.
|
||||
nodeRef *v1.ObjectReference
|
||||
|
||||
// Track initialization
|
||||
initialized bool
|
||||
}
|
||||
|
||||
// Information about the images we track.
|
||||
type imageRecord struct {
|
||||
// Time when this image was first detected.
|
||||
firstDetected time.Time
|
||||
|
||||
// Time when we last saw this image being used.
|
||||
lastUsed time.Time
|
||||
|
||||
// Size of the image in bytes.
|
||||
size int64
|
||||
}
|
||||
|
||||
func NewImageGCManager(runtime container.Runtime, cadvisorInterface cadvisor.Interface, recorder record.EventRecorder, nodeRef *v1.ObjectReference, policy ImageGCPolicy) (ImageGCManager, error) {
|
||||
// Validate policy.
|
||||
if policy.HighThresholdPercent < 0 || policy.HighThresholdPercent > 100 {
|
||||
return nil, fmt.Errorf("invalid HighThresholdPercent %d, must be in range [0-100]", policy.HighThresholdPercent)
|
||||
}
|
||||
if policy.LowThresholdPercent < 0 || policy.LowThresholdPercent > 100 {
|
||||
return nil, fmt.Errorf("invalid LowThresholdPercent %d, must be in range [0-100]", policy.LowThresholdPercent)
|
||||
}
|
||||
if policy.LowThresholdPercent > policy.HighThresholdPercent {
|
||||
return nil, fmt.Errorf("LowThresholdPercent %d can not be higher than HighThresholdPercent %d", policy.LowThresholdPercent, policy.HighThresholdPercent)
|
||||
}
|
||||
im := &realImageGCManager{
|
||||
runtime: runtime,
|
||||
policy: policy,
|
||||
imageRecords: make(map[string]*imageRecord),
|
||||
cadvisor: cadvisorInterface,
|
||||
recorder: recorder,
|
||||
nodeRef: nodeRef,
|
||||
initialized: false,
|
||||
}
|
||||
|
||||
return im, nil
|
||||
}
|
||||
|
||||
func (im *realImageGCManager) Start() error {
|
||||
go wait.Until(func() {
|
||||
// Initial detection make detected time "unknown" in the past.
|
||||
var ts time.Time
|
||||
if im.initialized {
|
||||
ts = time.Now()
|
||||
}
|
||||
err := im.detectImages(ts)
|
||||
if err != nil {
|
||||
glog.Warningf("[imageGCManager] Failed to monitor images: %v", err)
|
||||
} else {
|
||||
im.initialized = true
|
||||
}
|
||||
}, 5*time.Minute, wait.NeverStop)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get a list of images on this node
|
||||
func (im *realImageGCManager) GetImageList() ([]kubecontainer.Image, error) {
|
||||
images, err := im.runtime.ListImages()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return images, nil
|
||||
}
|
||||
|
||||
func (im *realImageGCManager) detectImages(detectTime time.Time) error {
|
||||
images, err := im.runtime.ListImages()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pods, err := im.runtime.GetPods(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make a set of images in use by containers.
|
||||
imagesInUse := sets.NewString()
|
||||
for _, pod := range pods {
|
||||
for _, container := range pod.Containers {
|
||||
glog.V(5).Infof("Pod %s/%s, container %s uses image %s(%s)", pod.Namespace, pod.Name, container.Name, container.Image, container.ImageID)
|
||||
imagesInUse.Insert(container.ImageID)
|
||||
}
|
||||
}
|
||||
|
||||
// Add new images and record those being used.
|
||||
now := time.Now()
|
||||
currentImages := sets.NewString()
|
||||
im.imageRecordsLock.Lock()
|
||||
defer im.imageRecordsLock.Unlock()
|
||||
for _, image := range images {
|
||||
glog.V(5).Infof("Adding image ID %s to currentImages", image.ID)
|
||||
currentImages.Insert(image.ID)
|
||||
|
||||
// New image, set it as detected now.
|
||||
if _, ok := im.imageRecords[image.ID]; !ok {
|
||||
glog.V(5).Infof("Image ID %s is new", image.ID)
|
||||
im.imageRecords[image.ID] = &imageRecord{
|
||||
firstDetected: detectTime,
|
||||
}
|
||||
}
|
||||
|
||||
// Set last used time to now if the image is being used.
|
||||
if isImageUsed(image, imagesInUse) {
|
||||
glog.V(5).Infof("Setting Image ID %s lastUsed to %v", image.ID, now)
|
||||
im.imageRecords[image.ID].lastUsed = now
|
||||
}
|
||||
|
||||
glog.V(5).Infof("Image ID %s has size %d", image.ID, image.Size)
|
||||
im.imageRecords[image.ID].size = image.Size
|
||||
}
|
||||
|
||||
// Remove old images from our records.
|
||||
for image := range im.imageRecords {
|
||||
if !currentImages.Has(image) {
|
||||
glog.V(5).Infof("Image ID %s is no longer present; removing from imageRecords", image)
|
||||
delete(im.imageRecords, image)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (im *realImageGCManager) GarbageCollect() error {
|
||||
// Get disk usage on disk holding images.
|
||||
fsInfo, err := im.cadvisor.ImagesFsInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
capacity := int64(fsInfo.Capacity)
|
||||
available := int64(fsInfo.Available)
|
||||
if available > capacity {
|
||||
glog.Warningf("available %d is larger than capacity %d", available, capacity)
|
||||
available = capacity
|
||||
}
|
||||
|
||||
// Check valid capacity.
|
||||
if capacity == 0 {
|
||||
err := fmt.Errorf("invalid capacity %d on device %q at mount point %q", capacity, fsInfo.Device, fsInfo.Mountpoint)
|
||||
im.recorder.Eventf(im.nodeRef, v1.EventTypeWarning, events.InvalidDiskCapacity, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
// If over the max threshold, free enough to place us at the lower threshold.
|
||||
usagePercent := 100 - int(available*100/capacity)
|
||||
if usagePercent >= im.policy.HighThresholdPercent {
|
||||
amountToFree := capacity*int64(100-im.policy.LowThresholdPercent)/100 - available
|
||||
glog.Infof("[imageGCManager]: Disk usage on %q (%s) is at %d%% which is over the high threshold (%d%%). Trying to free %d bytes", fsInfo.Device, fsInfo.Mountpoint, usagePercent, im.policy.HighThresholdPercent, amountToFree)
|
||||
freed, err := im.freeSpace(amountToFree, time.Now())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if freed < amountToFree {
|
||||
err := fmt.Errorf("failed to garbage collect required amount of images. Wanted to free %d, but freed %d", amountToFree, freed)
|
||||
im.recorder.Eventf(im.nodeRef, v1.EventTypeWarning, events.FreeDiskSpaceFailed, err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (im *realImageGCManager) DeleteUnusedImages() (int64, error) {
|
||||
return im.freeSpace(math.MaxInt64, time.Now())
|
||||
}
|
||||
|
||||
// Tries to free bytesToFree worth of images on the disk.
|
||||
//
|
||||
// Returns the number of bytes free and an error if any occurred. The number of
|
||||
// bytes freed is always returned.
|
||||
// Note that error may be nil and the number of bytes free may be less
|
||||
// than bytesToFree.
|
||||
func (im *realImageGCManager) freeSpace(bytesToFree int64, freeTime time.Time) (int64, error) {
|
||||
err := im.detectImages(freeTime)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
im.imageRecordsLock.Lock()
|
||||
defer im.imageRecordsLock.Unlock()
|
||||
|
||||
// Get all images in eviction order.
|
||||
images := make([]evictionInfo, 0, len(im.imageRecords))
|
||||
for image, record := range im.imageRecords {
|
||||
images = append(images, evictionInfo{
|
||||
id: image,
|
||||
imageRecord: *record,
|
||||
})
|
||||
}
|
||||
sort.Sort(byLastUsedAndDetected(images))
|
||||
|
||||
// Delete unused images until we've freed up enough space.
|
||||
var deletionErrors []error
|
||||
spaceFreed := int64(0)
|
||||
for _, image := range images {
|
||||
glog.V(5).Infof("Evaluating image ID %s for possible garbage collection", image.id)
|
||||
// Images that are currently in used were given a newer lastUsed.
|
||||
if image.lastUsed.Equal(freeTime) || image.lastUsed.After(freeTime) {
|
||||
glog.V(5).Infof("Image ID %s has lastUsed=%v which is >= freeTime=%v, not eligible for garbage collection", image.id, image.lastUsed, freeTime)
|
||||
break
|
||||
}
|
||||
|
||||
// Avoid garbage collect the image if the image is not old enough.
|
||||
// In such a case, the image may have just been pulled down, and will be used by a container right away.
|
||||
|
||||
if freeTime.Sub(image.firstDetected) < im.policy.MinAge {
|
||||
glog.V(5).Infof("Image ID %s has age %v which is less than the policy's minAge of %v, not eligible for garbage collection", image.id, freeTime.Sub(image.firstDetected), im.policy.MinAge)
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove image. Continue despite errors.
|
||||
glog.Infof("[imageGCManager]: Removing image %q to free %d bytes", image.id, image.size)
|
||||
err := im.runtime.RemoveImage(container.ImageSpec{Image: image.id})
|
||||
if err != nil {
|
||||
deletionErrors = append(deletionErrors, err)
|
||||
continue
|
||||
}
|
||||
delete(im.imageRecords, image.id)
|
||||
spaceFreed += image.size
|
||||
|
||||
if spaceFreed >= bytesToFree {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(deletionErrors) > 0 {
|
||||
return spaceFreed, fmt.Errorf("wanted to free %d, but freed %d space with errors in image deletion: %v", bytesToFree, spaceFreed, errors.NewAggregate(deletionErrors))
|
||||
}
|
||||
return spaceFreed, nil
|
||||
}
|
||||
|
||||
type evictionInfo struct {
|
||||
id string
|
||||
imageRecord
|
||||
}
|
||||
|
||||
type byLastUsedAndDetected []evictionInfo
|
||||
|
||||
func (ev byLastUsedAndDetected) Len() int { return len(ev) }
|
||||
func (ev byLastUsedAndDetected) Swap(i, j int) { ev[i], ev[j] = ev[j], ev[i] }
|
||||
func (ev byLastUsedAndDetected) Less(i, j int) bool {
|
||||
// Sort by last used, break ties by detected.
|
||||
if ev[i].lastUsed.Equal(ev[j].lastUsed) {
|
||||
return ev[i].firstDetected.Before(ev[j].firstDetected)
|
||||
} else {
|
||||
return ev[i].lastUsed.Before(ev[j].lastUsed)
|
||||
}
|
||||
}
|
||||
|
||||
func isImageUsed(image container.Image, imagesInUse sets.String) bool {
|
||||
// Check the image ID.
|
||||
if _, ok := imagesInUse[image.ID]; ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
465
vendor/k8s.io/kubernetes/pkg/kubelet/images/image_gc_manager_test.go
generated
vendored
Normal file
465
vendor/k8s.io/kubernetes/pkg/kubelet/images/image_gc_manager_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,465 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package images
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
)
|
||||
|
||||
var zero time.Time
|
||||
|
||||
func newRealImageGCManager(policy ImageGCPolicy) (*realImageGCManager, *containertest.FakeRuntime, *cadvisortest.Mock) {
|
||||
fakeRuntime := &containertest.FakeRuntime{}
|
||||
mockCadvisor := new(cadvisortest.Mock)
|
||||
return &realImageGCManager{
|
||||
runtime: fakeRuntime,
|
||||
policy: policy,
|
||||
imageRecords: make(map[string]*imageRecord),
|
||||
cadvisor: mockCadvisor,
|
||||
recorder: &record.FakeRecorder{},
|
||||
}, fakeRuntime, mockCadvisor
|
||||
}
|
||||
|
||||
// Accessors used for thread-safe testing.
|
||||
func (im *realImageGCManager) imageRecordsLen() int {
|
||||
im.imageRecordsLock.Lock()
|
||||
defer im.imageRecordsLock.Unlock()
|
||||
return len(im.imageRecords)
|
||||
}
|
||||
func (im *realImageGCManager) getImageRecord(name string) (*imageRecord, bool) {
|
||||
im.imageRecordsLock.Lock()
|
||||
defer im.imageRecordsLock.Unlock()
|
||||
v, ok := im.imageRecords[name]
|
||||
vCopy := *v
|
||||
return &vCopy, ok
|
||||
}
|
||||
|
||||
// Returns the id of the image with the given ID.
|
||||
func imageID(id int) string {
|
||||
return fmt.Sprintf("image-%d", id)
|
||||
}
|
||||
|
||||
// Returns the name of the image with the given ID.
|
||||
func imageName(id int) string {
|
||||
return imageID(id) + "-name"
|
||||
}
|
||||
|
||||
// Make an image with the specified ID.
|
||||
func makeImage(id int, size int64) container.Image {
|
||||
return container.Image{
|
||||
ID: imageID(id),
|
||||
Size: size,
|
||||
}
|
||||
}
|
||||
|
||||
// Make a container with the specified ID. It will use the image with the same ID.
|
||||
func makeContainer(id int) *container.Container {
|
||||
return &container.Container{
|
||||
ID: container.ContainerID{Type: "test", ID: fmt.Sprintf("container-%d", id)},
|
||||
Image: imageName(id),
|
||||
ImageID: imageID(id),
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectImagesInitialDetect(t *testing.T) {
|
||||
manager, fakeRuntime, _ := newRealImageGCManager(ImageGCPolicy{})
|
||||
fakeRuntime.ImageList = []container.Image{
|
||||
makeImage(0, 1024),
|
||||
makeImage(1, 2048),
|
||||
makeImage(2, 2048),
|
||||
}
|
||||
fakeRuntime.AllPodList = []*containertest.FakePod{
|
||||
{Pod: &container.Pod{
|
||||
Containers: []*container.Container{
|
||||
{
|
||||
ID: container.ContainerID{Type: "test", ID: fmt.Sprintf("container-%d", 1)},
|
||||
ImageID: imageID(1),
|
||||
// The image filed is not set to simulate a no-name image
|
||||
},
|
||||
{
|
||||
ID: container.ContainerID{Type: "test", ID: fmt.Sprintf("container-%d", 2)},
|
||||
Image: imageName(2),
|
||||
ImageID: imageID(2),
|
||||
},
|
||||
},
|
||||
}},
|
||||
}
|
||||
|
||||
startTime := time.Now().Add(-time.Millisecond)
|
||||
err := manager.detectImages(zero)
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(manager.imageRecordsLen(), 3)
|
||||
noContainer, ok := manager.getImageRecord(imageID(0))
|
||||
require.True(t, ok)
|
||||
assert.Equal(zero, noContainer.firstDetected)
|
||||
assert.Equal(zero, noContainer.lastUsed)
|
||||
withContainerUsingNoNameImage, ok := manager.getImageRecord(imageID(1))
|
||||
require.True(t, ok)
|
||||
assert.Equal(zero, withContainerUsingNoNameImage.firstDetected)
|
||||
assert.True(withContainerUsingNoNameImage.lastUsed.After(startTime))
|
||||
withContainer, ok := manager.getImageRecord(imageID(2))
|
||||
require.True(t, ok)
|
||||
assert.Equal(zero, withContainer.firstDetected)
|
||||
assert.True(withContainer.lastUsed.After(startTime))
|
||||
}
|
||||
|
||||
func TestDetectImagesWithNewImage(t *testing.T) {
|
||||
// Just one image initially.
|
||||
manager, fakeRuntime, _ := newRealImageGCManager(ImageGCPolicy{})
|
||||
fakeRuntime.ImageList = []container.Image{
|
||||
makeImage(0, 1024),
|
||||
makeImage(1, 2048),
|
||||
}
|
||||
fakeRuntime.AllPodList = []*containertest.FakePod{
|
||||
{Pod: &container.Pod{
|
||||
Containers: []*container.Container{
|
||||
makeContainer(1),
|
||||
},
|
||||
}},
|
||||
}
|
||||
|
||||
err := manager.detectImages(zero)
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(manager.imageRecordsLen(), 2)
|
||||
|
||||
// Add a new image.
|
||||
fakeRuntime.ImageList = []container.Image{
|
||||
makeImage(0, 1024),
|
||||
makeImage(1, 1024),
|
||||
makeImage(2, 1024),
|
||||
}
|
||||
|
||||
detectedTime := zero.Add(time.Second)
|
||||
startTime := time.Now().Add(-time.Millisecond)
|
||||
err = manager.detectImages(detectedTime)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(manager.imageRecordsLen(), 3)
|
||||
noContainer, ok := manager.getImageRecord(imageID(0))
|
||||
require.True(t, ok)
|
||||
assert.Equal(zero, noContainer.firstDetected)
|
||||
assert.Equal(zero, noContainer.lastUsed)
|
||||
withContainer, ok := manager.getImageRecord(imageID(1))
|
||||
require.True(t, ok)
|
||||
assert.Equal(zero, withContainer.firstDetected)
|
||||
assert.True(withContainer.lastUsed.After(startTime))
|
||||
newContainer, ok := manager.getImageRecord(imageID(2))
|
||||
require.True(t, ok)
|
||||
assert.Equal(detectedTime, newContainer.firstDetected)
|
||||
assert.Equal(zero, noContainer.lastUsed)
|
||||
}
|
||||
|
||||
func TestDetectImagesContainerStopped(t *testing.T) {
|
||||
manager, fakeRuntime, _ := newRealImageGCManager(ImageGCPolicy{})
|
||||
fakeRuntime.ImageList = []container.Image{
|
||||
makeImage(0, 1024),
|
||||
makeImage(1, 2048),
|
||||
}
|
||||
fakeRuntime.AllPodList = []*containertest.FakePod{
|
||||
{Pod: &container.Pod{
|
||||
Containers: []*container.Container{
|
||||
makeContainer(1),
|
||||
},
|
||||
}},
|
||||
}
|
||||
|
||||
err := manager.detectImages(zero)
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(manager.imageRecordsLen(), 2)
|
||||
withContainer, ok := manager.getImageRecord(imageID(1))
|
||||
require.True(t, ok)
|
||||
|
||||
// Simulate container being stopped.
|
||||
fakeRuntime.AllPodList = []*containertest.FakePod{}
|
||||
err = manager.detectImages(time.Now())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(manager.imageRecordsLen(), 2)
|
||||
container1, ok := manager.getImageRecord(imageID(0))
|
||||
require.True(t, ok)
|
||||
assert.Equal(zero, container1.firstDetected)
|
||||
assert.Equal(zero, container1.lastUsed)
|
||||
container2, ok := manager.getImageRecord(imageID(1))
|
||||
require.True(t, ok)
|
||||
assert.Equal(zero, container2.firstDetected)
|
||||
assert.True(container2.lastUsed.Equal(withContainer.lastUsed))
|
||||
}
|
||||
|
||||
func TestDetectImagesWithRemovedImages(t *testing.T) {
|
||||
manager, fakeRuntime, _ := newRealImageGCManager(ImageGCPolicy{})
|
||||
fakeRuntime.ImageList = []container.Image{
|
||||
makeImage(0, 1024),
|
||||
makeImage(1, 2048),
|
||||
}
|
||||
fakeRuntime.AllPodList = []*containertest.FakePod{
|
||||
{Pod: &container.Pod{
|
||||
Containers: []*container.Container{
|
||||
makeContainer(1),
|
||||
},
|
||||
}},
|
||||
}
|
||||
|
||||
err := manager.detectImages(zero)
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(manager.imageRecordsLen(), 2)
|
||||
|
||||
// Simulate both images being removed.
|
||||
fakeRuntime.ImageList = []container.Image{}
|
||||
err = manager.detectImages(time.Now())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(manager.imageRecordsLen(), 0)
|
||||
}
|
||||
|
||||
func TestFreeSpaceImagesInUseContainersAreIgnored(t *testing.T) {
|
||||
manager, fakeRuntime, _ := newRealImageGCManager(ImageGCPolicy{})
|
||||
fakeRuntime.ImageList = []container.Image{
|
||||
makeImage(0, 1024),
|
||||
makeImage(1, 2048),
|
||||
}
|
||||
fakeRuntime.AllPodList = []*containertest.FakePod{
|
||||
{Pod: &container.Pod{
|
||||
Containers: []*container.Container{
|
||||
makeContainer(1),
|
||||
},
|
||||
}},
|
||||
}
|
||||
|
||||
spaceFreed, err := manager.freeSpace(2048, time.Now())
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(1024, spaceFreed)
|
||||
assert.Len(fakeRuntime.ImageList, 1)
|
||||
}
|
||||
|
||||
func TestDeleteUnusedImagesRemoveAllUnusedImages(t *testing.T) {
|
||||
manager, fakeRuntime, _ := newRealImageGCManager(ImageGCPolicy{})
|
||||
fakeRuntime.ImageList = []container.Image{
|
||||
makeImage(0, 1024),
|
||||
makeImage(1, 2048),
|
||||
makeImage(2, 2048),
|
||||
}
|
||||
fakeRuntime.AllPodList = []*containertest.FakePod{
|
||||
{Pod: &container.Pod{
|
||||
Containers: []*container.Container{
|
||||
makeContainer(2),
|
||||
},
|
||||
}},
|
||||
}
|
||||
|
||||
spaceFreed, err := manager.DeleteUnusedImages()
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(3072, spaceFreed)
|
||||
assert.Len(fakeRuntime.ImageList, 1)
|
||||
}
|
||||
|
||||
func TestFreeSpaceRemoveByLeastRecentlyUsed(t *testing.T) {
|
||||
manager, fakeRuntime, _ := newRealImageGCManager(ImageGCPolicy{})
|
||||
fakeRuntime.ImageList = []container.Image{
|
||||
makeImage(0, 1024),
|
||||
makeImage(1, 2048),
|
||||
}
|
||||
fakeRuntime.AllPodList = []*containertest.FakePod{
|
||||
{Pod: &container.Pod{
|
||||
Containers: []*container.Container{
|
||||
makeContainer(0),
|
||||
makeContainer(1),
|
||||
},
|
||||
}},
|
||||
}
|
||||
|
||||
// Make 1 be more recently used than 0.
|
||||
require.NoError(t, manager.detectImages(zero))
|
||||
fakeRuntime.AllPodList = []*containertest.FakePod{
|
||||
{Pod: &container.Pod{
|
||||
Containers: []*container.Container{
|
||||
makeContainer(1),
|
||||
},
|
||||
}},
|
||||
}
|
||||
require.NoError(t, manager.detectImages(time.Now()))
|
||||
fakeRuntime.AllPodList = []*containertest.FakePod{
|
||||
{Pod: &container.Pod{
|
||||
Containers: []*container.Container{},
|
||||
}},
|
||||
}
|
||||
require.NoError(t, manager.detectImages(time.Now()))
|
||||
require.Equal(t, manager.imageRecordsLen(), 2)
|
||||
|
||||
spaceFreed, err := manager.freeSpace(1024, time.Now())
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(1024, spaceFreed)
|
||||
assert.Len(fakeRuntime.ImageList, 1)
|
||||
}
|
||||
|
||||
func TestFreeSpaceTiesBrokenByDetectedTime(t *testing.T) {
|
||||
manager, fakeRuntime, _ := newRealImageGCManager(ImageGCPolicy{})
|
||||
fakeRuntime.ImageList = []container.Image{
|
||||
makeImage(0, 1024),
|
||||
}
|
||||
fakeRuntime.AllPodList = []*containertest.FakePod{
|
||||
{Pod: &container.Pod{
|
||||
Containers: []*container.Container{
|
||||
makeContainer(0),
|
||||
},
|
||||
}},
|
||||
}
|
||||
|
||||
// Make 1 more recently detected but used at the same time as 0.
|
||||
require.NoError(t, manager.detectImages(zero))
|
||||
fakeRuntime.ImageList = []container.Image{
|
||||
makeImage(0, 1024),
|
||||
makeImage(1, 2048),
|
||||
}
|
||||
require.NoError(t, manager.detectImages(time.Now()))
|
||||
fakeRuntime.AllPodList = []*containertest.FakePod{}
|
||||
require.NoError(t, manager.detectImages(time.Now()))
|
||||
require.Equal(t, manager.imageRecordsLen(), 2)
|
||||
|
||||
spaceFreed, err := manager.freeSpace(1024, time.Now())
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(2048, spaceFreed)
|
||||
assert.Len(fakeRuntime.ImageList, 1)
|
||||
}
|
||||
|
||||
func TestGarbageCollectBelowLowThreshold(t *testing.T) {
|
||||
policy := ImageGCPolicy{
|
||||
HighThresholdPercent: 90,
|
||||
LowThresholdPercent: 80,
|
||||
}
|
||||
manager, _, mockCadvisor := newRealImageGCManager(policy)
|
||||
|
||||
// Expect 40% usage.
|
||||
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
|
||||
Available: 600,
|
||||
Capacity: 1000,
|
||||
}, nil)
|
||||
|
||||
assert.NoError(t, manager.GarbageCollect())
|
||||
}
|
||||
|
||||
func TestGarbageCollectCadvisorFailure(t *testing.T) {
|
||||
policy := ImageGCPolicy{
|
||||
HighThresholdPercent: 90,
|
||||
LowThresholdPercent: 80,
|
||||
}
|
||||
manager, _, mockCadvisor := newRealImageGCManager(policy)
|
||||
|
||||
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, fmt.Errorf("error"))
|
||||
assert.NotNil(t, manager.GarbageCollect())
|
||||
}
|
||||
|
||||
func TestGarbageCollectBelowSuccess(t *testing.T) {
|
||||
policy := ImageGCPolicy{
|
||||
HighThresholdPercent: 90,
|
||||
LowThresholdPercent: 80,
|
||||
}
|
||||
manager, fakeRuntime, mockCadvisor := newRealImageGCManager(policy)
|
||||
|
||||
// Expect 95% usage and most of it gets freed.
|
||||
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
|
||||
Available: 50,
|
||||
Capacity: 1000,
|
||||
}, nil)
|
||||
fakeRuntime.ImageList = []container.Image{
|
||||
makeImage(0, 450),
|
||||
}
|
||||
|
||||
assert.NoError(t, manager.GarbageCollect())
|
||||
}
|
||||
|
||||
func TestGarbageCollectNotEnoughFreed(t *testing.T) {
|
||||
policy := ImageGCPolicy{
|
||||
HighThresholdPercent: 90,
|
||||
LowThresholdPercent: 80,
|
||||
}
|
||||
manager, fakeRuntime, mockCadvisor := newRealImageGCManager(policy)
|
||||
|
||||
// Expect 95% usage and little of it gets freed.
|
||||
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
|
||||
Available: 50,
|
||||
Capacity: 1000,
|
||||
}, nil)
|
||||
fakeRuntime.ImageList = []container.Image{
|
||||
makeImage(0, 50),
|
||||
}
|
||||
|
||||
assert.NotNil(t, manager.GarbageCollect())
|
||||
}
|
||||
|
||||
func TestGarbageCollectImageNotOldEnough(t *testing.T) {
|
||||
policy := ImageGCPolicy{
|
||||
HighThresholdPercent: 90,
|
||||
LowThresholdPercent: 80,
|
||||
MinAge: time.Minute * 1,
|
||||
}
|
||||
fakeRuntime := &containertest.FakeRuntime{}
|
||||
mockCadvisor := new(cadvisortest.Mock)
|
||||
manager := &realImageGCManager{
|
||||
runtime: fakeRuntime,
|
||||
policy: policy,
|
||||
imageRecords: make(map[string]*imageRecord),
|
||||
cadvisor: mockCadvisor,
|
||||
recorder: &record.FakeRecorder{},
|
||||
}
|
||||
|
||||
fakeRuntime.ImageList = []container.Image{
|
||||
makeImage(0, 1024),
|
||||
makeImage(1, 2048),
|
||||
}
|
||||
// 1 image is in use, and another one is not old enough
|
||||
fakeRuntime.AllPodList = []*containertest.FakePod{
|
||||
{Pod: &container.Pod{
|
||||
Containers: []*container.Container{
|
||||
makeContainer(1),
|
||||
},
|
||||
}},
|
||||
}
|
||||
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
t.Log(fakeClock.Now())
|
||||
require.NoError(t, manager.detectImages(fakeClock.Now()))
|
||||
require.Equal(t, manager.imageRecordsLen(), 2)
|
||||
// no space freed since one image is in used, and another one is not old enough
|
||||
spaceFreed, err := manager.freeSpace(1024, fakeClock.Now())
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(0, spaceFreed)
|
||||
assert.Len(fakeRuntime.ImageList, 2)
|
||||
|
||||
// move clock by minAge duration, then 1 image will be garbage collected
|
||||
fakeClock.Step(policy.MinAge)
|
||||
spaceFreed, err = manager.freeSpace(1024, fakeClock.Now())
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(1024, spaceFreed)
|
||||
assert.Len(fakeRuntime.ImageList, 1)
|
||||
}
|
||||
161
vendor/k8s.io/kubernetes/pkg/kubelet/images/image_manager.go
generated
vendored
Normal file
161
vendor/k8s.io/kubernetes/pkg/kubelet/images/image_manager.go
generated
vendored
Normal file
|
|
@ -0,0 +1,161 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package images
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
dockerref "github.com/docker/distribution/reference"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/util/parsers"
|
||||
)
|
||||
|
||||
// imageManager provides the functionalities for image pulling.
|
||||
type imageManager struct {
|
||||
recorder record.EventRecorder
|
||||
imageService kubecontainer.ImageService
|
||||
backOff *flowcontrol.Backoff
|
||||
// It will check the presence of the image, and report the 'image pulling', image pulled' events correspondingly.
|
||||
puller imagePuller
|
||||
}
|
||||
|
||||
var _ ImageManager = &imageManager{}
|
||||
|
||||
func NewImageManager(recorder record.EventRecorder, imageService kubecontainer.ImageService, imageBackOff *flowcontrol.Backoff, serialized bool, qps float32, burst int) ImageManager {
|
||||
imageService = throttleImagePulling(imageService, qps, burst)
|
||||
|
||||
var puller imagePuller
|
||||
if serialized {
|
||||
puller = newSerialImagePuller(imageService)
|
||||
} else {
|
||||
puller = newParallelImagePuller(imageService)
|
||||
}
|
||||
return &imageManager{
|
||||
recorder: recorder,
|
||||
imageService: imageService,
|
||||
backOff: imageBackOff,
|
||||
puller: puller,
|
||||
}
|
||||
}
|
||||
|
||||
// shouldPullImage returns whether we should pull an image according to
|
||||
// the presence and pull policy of the image.
|
||||
func shouldPullImage(container *v1.Container, imagePresent bool) bool {
|
||||
if container.ImagePullPolicy == v1.PullNever {
|
||||
return false
|
||||
}
|
||||
|
||||
if container.ImagePullPolicy == v1.PullAlways ||
|
||||
(container.ImagePullPolicy == v1.PullIfNotPresent && (!imagePresent)) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// records an event using ref, event msg. log to glog using prefix, msg, logFn
|
||||
func (m *imageManager) logIt(ref *v1.ObjectReference, eventtype, event, prefix, msg string, logFn func(args ...interface{})) {
|
||||
if ref != nil {
|
||||
m.recorder.Event(ref, eventtype, event, msg)
|
||||
} else {
|
||||
logFn(fmt.Sprint(prefix, " ", msg))
|
||||
}
|
||||
}
|
||||
|
||||
// EnsureImageExists pulls the image for the specified pod and container.
|
||||
func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret) (error, string) {
|
||||
logPrefix := fmt.Sprintf("%s/%s", pod.Name, container.Image)
|
||||
ref, err := kubecontainer.GenerateContainerRef(pod, container)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't make a ref to pod %v, container %v: '%v'", pod.Name, container.Name, err)
|
||||
}
|
||||
|
||||
// If the image contains no tag or digest, a default tag should be applied.
|
||||
image, err := applyDefaultImageTag(container.Image)
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("Failed to apply default image tag %q: %v", container.Image, err)
|
||||
m.logIt(ref, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, glog.Warning)
|
||||
return ErrInvalidImageName, msg
|
||||
}
|
||||
|
||||
spec := kubecontainer.ImageSpec{Image: image}
|
||||
present, err := m.imageService.IsImagePresent(spec)
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err)
|
||||
m.logIt(ref, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, glog.Warning)
|
||||
return ErrImageInspect, msg
|
||||
}
|
||||
|
||||
if !shouldPullImage(container, present) {
|
||||
if present {
|
||||
msg := fmt.Sprintf("Container image %q already present on machine", container.Image)
|
||||
m.logIt(ref, v1.EventTypeNormal, events.PulledImage, logPrefix, msg, glog.Info)
|
||||
return nil, ""
|
||||
} else {
|
||||
msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image)
|
||||
m.logIt(ref, v1.EventTypeWarning, events.ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning)
|
||||
return ErrImageNeverPull, msg
|
||||
}
|
||||
}
|
||||
|
||||
backOffKey := fmt.Sprintf("%s_%s", pod.UID, container.Image)
|
||||
if m.backOff.IsInBackOffSinceUpdate(backOffKey, m.backOff.Clock.Now()) {
|
||||
msg := fmt.Sprintf("Back-off pulling image %q", container.Image)
|
||||
m.logIt(ref, v1.EventTypeNormal, events.BackOffPullImage, logPrefix, msg, glog.Info)
|
||||
return ErrImagePullBackOff, msg
|
||||
}
|
||||
m.logIt(ref, v1.EventTypeNormal, events.PullingImage, logPrefix, fmt.Sprintf("pulling image %q", container.Image), glog.Info)
|
||||
errChan := make(chan error)
|
||||
m.puller.pullImage(spec, pullSecrets, errChan)
|
||||
if err := <-errChan; err != nil {
|
||||
m.logIt(ref, v1.EventTypeWarning, events.FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning)
|
||||
m.backOff.Next(backOffKey, m.backOff.Clock.Now())
|
||||
if err == RegistryUnavailable {
|
||||
msg := fmt.Sprintf("image pull failed for %s because the registry is unavailable.", container.Image)
|
||||
return err, msg
|
||||
} else {
|
||||
return ErrImagePull, err.Error()
|
||||
}
|
||||
}
|
||||
m.logIt(ref, v1.EventTypeNormal, events.PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info)
|
||||
m.backOff.GC()
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
// applyDefaultImageTag parses a docker image string, if it doesn't contain any tag or digest,
|
||||
// a default tag will be applied.
|
||||
func applyDefaultImageTag(image string) (string, error) {
|
||||
named, err := dockerref.ParseNamed(image)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("couldn't parse image reference %q: %v", image, err)
|
||||
}
|
||||
_, isTagged := named.(dockerref.Tagged)
|
||||
_, isDigested := named.(dockerref.Digested)
|
||||
if !isTagged && !isDigested {
|
||||
named, err := dockerref.WithTag(named, parsers.DefaultImageTag)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to apply default image tag %q: %v", image, err)
|
||||
}
|
||||
image = named.String()
|
||||
}
|
||||
return image, nil
|
||||
}
|
||||
176
vendor/k8s.io/kubernetes/pkg/kubelet/images/image_manager_test.go
generated
vendored
Normal file
176
vendor/k8s.io/kubernetes/pkg/kubelet/images/image_manager_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,176 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package images
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
. "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
ctest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
)
|
||||
|
||||
type pullerTestCase struct {
|
||||
containerImage string
|
||||
policy v1.PullPolicy
|
||||
calledFunctions []string
|
||||
inspectErr error
|
||||
pullerErr error
|
||||
expectedErr []error
|
||||
}
|
||||
|
||||
func pullerTestCases() []pullerTestCase {
|
||||
return []pullerTestCase{
|
||||
{ // pull missing image
|
||||
containerImage: "missing_image",
|
||||
policy: v1.PullIfNotPresent,
|
||||
calledFunctions: []string{"IsImagePresent", "PullImage"},
|
||||
inspectErr: nil,
|
||||
pullerErr: nil,
|
||||
expectedErr: []error{nil}},
|
||||
|
||||
{ // image present, don't pull
|
||||
containerImage: "present_image",
|
||||
policy: v1.PullIfNotPresent,
|
||||
calledFunctions: []string{"IsImagePresent"},
|
||||
inspectErr: nil,
|
||||
pullerErr: nil,
|
||||
expectedErr: []error{nil, nil, nil}},
|
||||
// image present, pull it
|
||||
{containerImage: "present_image",
|
||||
policy: v1.PullAlways,
|
||||
calledFunctions: []string{"IsImagePresent", "PullImage"},
|
||||
inspectErr: nil,
|
||||
pullerErr: nil,
|
||||
expectedErr: []error{nil, nil, nil}},
|
||||
// missing image, error PullNever
|
||||
{containerImage: "missing_image",
|
||||
policy: v1.PullNever,
|
||||
calledFunctions: []string{"IsImagePresent"},
|
||||
inspectErr: nil,
|
||||
pullerErr: nil,
|
||||
expectedErr: []error{ErrImageNeverPull, ErrImageNeverPull, ErrImageNeverPull}},
|
||||
// missing image, unable to inspect
|
||||
{containerImage: "missing_image",
|
||||
policy: v1.PullIfNotPresent,
|
||||
calledFunctions: []string{"IsImagePresent"},
|
||||
inspectErr: errors.New("unknown inspectError"),
|
||||
pullerErr: nil,
|
||||
expectedErr: []error{ErrImageInspect, ErrImageInspect, ErrImageInspect}},
|
||||
// missing image, unable to fetch
|
||||
{containerImage: "typo_image",
|
||||
policy: v1.PullIfNotPresent,
|
||||
calledFunctions: []string{"IsImagePresent", "PullImage"},
|
||||
inspectErr: nil,
|
||||
pullerErr: errors.New("404"),
|
||||
expectedErr: []error{ErrImagePull, ErrImagePull, ErrImagePullBackOff, ErrImagePull, ErrImagePullBackOff, ErrImagePullBackOff}},
|
||||
}
|
||||
}
|
||||
|
||||
func pullerTestEnv(c pullerTestCase, serialized bool) (puller ImageManager, fakeClock *clock.FakeClock, fakeRuntime *ctest.FakeRuntime, container *v1.Container) {
|
||||
container = &v1.Container{
|
||||
Name: "container_name",
|
||||
Image: c.containerImage,
|
||||
ImagePullPolicy: c.policy,
|
||||
}
|
||||
|
||||
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
|
||||
fakeClock = clock.NewFakeClock(time.Now())
|
||||
backOff.Clock = fakeClock
|
||||
|
||||
fakeRuntime = &ctest.FakeRuntime{}
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
|
||||
fakeRuntime.ImageList = []Image{{ID: "present_image"}}
|
||||
fakeRuntime.Err = c.pullerErr
|
||||
fakeRuntime.InspectErr = c.inspectErr
|
||||
|
||||
puller = NewImageManager(fakeRecorder, fakeRuntime, backOff, serialized, 0, 0)
|
||||
return
|
||||
}
|
||||
|
||||
func TestParallelPuller(t *testing.T) {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "test_pod",
|
||||
Namespace: "test-ns",
|
||||
UID: "bar",
|
||||
ResourceVersion: "42",
|
||||
SelfLink: "/api/v1/pods/foo",
|
||||
}}
|
||||
|
||||
cases := pullerTestCases()
|
||||
|
||||
for i, c := range cases {
|
||||
puller, fakeClock, fakeRuntime, container := pullerTestEnv(c, false)
|
||||
|
||||
for tick, expected := range c.expectedErr {
|
||||
fakeClock.Step(time.Second)
|
||||
err, _ := puller.EnsureImageExists(pod, container, nil)
|
||||
fakeRuntime.AssertCalls(c.calledFunctions)
|
||||
assert.Equal(t, expected, err, "in test %d tick=%d", i, tick)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSerializedPuller(t *testing.T) {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "test_pod",
|
||||
Namespace: "test-ns",
|
||||
UID: "bar",
|
||||
ResourceVersion: "42",
|
||||
SelfLink: "/api/v1/pods/foo",
|
||||
}}
|
||||
|
||||
cases := pullerTestCases()
|
||||
|
||||
for i, c := range cases {
|
||||
puller, fakeClock, fakeRuntime, container := pullerTestEnv(c, true)
|
||||
|
||||
for tick, expected := range c.expectedErr {
|
||||
fakeClock.Step(time.Second)
|
||||
err, _ := puller.EnsureImageExists(pod, container, nil)
|
||||
fakeRuntime.AssertCalls(c.calledFunctions)
|
||||
assert.Equal(t, expected, err, "in test %d tick=%d", i, tick)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyDefaultImageTag(t *testing.T) {
|
||||
for _, testCase := range []struct {
|
||||
Input string
|
||||
Output string
|
||||
}{
|
||||
{Input: "root", Output: "root:latest"},
|
||||
{Input: "root:tag", Output: "root:tag"},
|
||||
{Input: "root@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", Output: "root@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
|
||||
} {
|
||||
image, err := applyDefaultImageTag(testCase.Input)
|
||||
if err != nil {
|
||||
t.Errorf("applyDefaultImageTag(%s) failed: %v", testCase.Input, err)
|
||||
} else if image != testCase.Output {
|
||||
t.Errorf("Expected image reference: %q, got %q", testCase.Output, image)
|
||||
}
|
||||
}
|
||||
}
|
||||
79
vendor/k8s.io/kubernetes/pkg/kubelet/images/puller.go
generated
vendored
Normal file
79
vendor/k8s.io/kubernetes/pkg/kubelet/images/puller.go
generated
vendored
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package images
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
)
|
||||
|
||||
type imagePuller interface {
|
||||
pullImage(kubecontainer.ImageSpec, []v1.Secret, chan<- error)
|
||||
}
|
||||
|
||||
var _, _ imagePuller = ¶llelImagePuller{}, &serialImagePuller{}
|
||||
|
||||
type parallelImagePuller struct {
|
||||
imageService kubecontainer.ImageService
|
||||
}
|
||||
|
||||
func newParallelImagePuller(imageService kubecontainer.ImageService) imagePuller {
|
||||
return ¶llelImagePuller{imageService}
|
||||
}
|
||||
|
||||
func (pip *parallelImagePuller) pullImage(spec kubecontainer.ImageSpec, pullSecrets []v1.Secret, errChan chan<- error) {
|
||||
go func() {
|
||||
errChan <- pip.imageService.PullImage(spec, pullSecrets)
|
||||
}()
|
||||
}
|
||||
|
||||
// Maximum number of image pull requests than can be queued.
|
||||
const maxImagePullRequests = 10
|
||||
|
||||
type serialImagePuller struct {
|
||||
imageService kubecontainer.ImageService
|
||||
pullRequests chan *imagePullRequest
|
||||
}
|
||||
|
||||
func newSerialImagePuller(imageService kubecontainer.ImageService) imagePuller {
|
||||
imagePuller := &serialImagePuller{imageService, make(chan *imagePullRequest, maxImagePullRequests)}
|
||||
go wait.Until(imagePuller.processImagePullRequests, time.Second, wait.NeverStop)
|
||||
return imagePuller
|
||||
}
|
||||
|
||||
type imagePullRequest struct {
|
||||
spec kubecontainer.ImageSpec
|
||||
pullSecrets []v1.Secret
|
||||
errChan chan<- error
|
||||
}
|
||||
|
||||
func (sip *serialImagePuller) pullImage(spec kubecontainer.ImageSpec, pullSecrets []v1.Secret, errChan chan<- error) {
|
||||
sip.pullRequests <- &imagePullRequest{
|
||||
spec: spec,
|
||||
pullSecrets: pullSecrets,
|
||||
errChan: errChan,
|
||||
}
|
||||
}
|
||||
|
||||
func (sip *serialImagePuller) processImagePullRequests() {
|
||||
for pullRequest := range sip.pullRequests {
|
||||
pullRequest.errChan <- sip.imageService.PullImage(pullRequest.spec, pullRequest.pullSecrets)
|
||||
}
|
||||
}
|
||||
55
vendor/k8s.io/kubernetes/pkg/kubelet/images/types.go
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/pkg/kubelet/images/types.go
generated
vendored
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package images
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
// Container image pull failed, kubelet is backing off image pull
|
||||
ErrImagePullBackOff = errors.New("ImagePullBackOff")
|
||||
|
||||
// Unable to inspect image
|
||||
ErrImageInspect = errors.New("ImageInspectError")
|
||||
|
||||
// General image pull error
|
||||
ErrImagePull = errors.New("ErrImagePull")
|
||||
|
||||
// Required Image is absent on host and PullPolicy is NeverPullImage
|
||||
ErrImageNeverPull = errors.New("ErrImageNeverPull")
|
||||
|
||||
// Get http error when pulling image from registry
|
||||
RegistryUnavailable = errors.New("RegistryUnavailable")
|
||||
|
||||
// Unable to parse the image name.
|
||||
ErrInvalidImageName = errors.New("InvalidImageName")
|
||||
)
|
||||
|
||||
// ImageManager provides an interface to manage the lifecycle of images.
|
||||
// Implementations of this interface are expected to deal with pulling (downloading),
|
||||
// managing, and deleting container images.
|
||||
// Implementations are expected to abstract the underlying runtimes.
|
||||
// Implementations are expected to be thread safe.
|
||||
type ImageManager interface {
|
||||
// EnsureImageExists ensures that image specified in `container` exists.
|
||||
EnsureImageExists(pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret) (error, string)
|
||||
|
||||
// TODO(ronl): consolidating image managing and deleting operation in this interface
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue