1
0
Fork 0
forked from barak/tarpoon

Add glide.yaml and vendor deps

This commit is contained in:
Dalton Hubble 2016-12-03 22:43:32 -08:00
parent db918f12ad
commit 5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions

View file

@ -0,0 +1,77 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"deployment_controller.go",
"progress.go",
"recreate.go",
"rollback.go",
"rolling.go",
"sync.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/errors:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/core/v1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/client/retry:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//pkg/controller/informers:go_default_library",
"//pkg/labels:go_default_library",
"//pkg/util/errors:go_default_library",
"//pkg/util/integer:go_default_library",
"//pkg/util/labels:go_default_library",
"//pkg/util/metrics:go_default_library",
"//pkg/util/pod:go_default_library",
"//pkg/util/replicaset:go_default_library",
"//pkg/util/runtime:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/util/workqueue:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = [
"deployment_controller_test.go",
"rolling_test.go",
"sync_test.go",
],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/apimachinery/registered:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//pkg/controller/informers:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/runtime/schema:go_default_library",
"//pkg/util/intstr:go_default_library",
"//pkg/util/uuid:go_default_library",
],
)

View file

@ -0,0 +1,455 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package deployment contains all the logic for handling Kubernetes Deployments.
// It implements a set of strategies (rolling, recreate) for deploying an application,
// the means to rollback to previous versions, proportional scaling for mitigating
// risk, cleanup policy, and other useful features of Deployments.
package deployment
import (
"fmt"
"reflect"
"sort"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/metrics"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/util/workqueue"
)
const (
// FullDeploymentResyncPeriod means we'll attempt to recompute the required replicas
// of all deployments.
// This recomputation happens based on contents in the local caches.
FullDeploymentResyncPeriod = 30 * time.Second
// We must avoid creating new replica set / counting pods until the replica set / pods store has synced.
// If it hasn't synced, to avoid a hot loop, we'll wait this long between checks.
StoreSyncedPollPeriod = 100 * time.Millisecond
// MaxRetries is the number of times a deployment will be retried before it is dropped out of the queue.
MaxRetries = 5
)
// DeploymentController is responsible for synchronizing Deployment objects stored
// in the system with actual running replica sets and pods.
type DeploymentController struct {
client clientset.Interface
eventRecorder record.EventRecorder
// To allow injection of syncDeployment for testing.
syncHandler func(dKey string) error
// A store of deployments, populated by the dController
dLister *cache.StoreToDeploymentLister
// A store of ReplicaSets, populated by the rsController
rsLister *cache.StoreToReplicaSetLister
// A store of pods, populated by the podController
podLister *cache.StoreToPodLister
// dListerSynced returns true if the Deployment store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
dListerSynced cache.InformerSynced
// rsListerSynced returns true if the ReplicaSet store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
rsListerSynced cache.InformerSynced
// podListerSynced returns true if the pod store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
podListerSynced cache.InformerSynced
// Deployments that need to be synced
queue workqueue.RateLimitingInterface
}
// NewDeploymentController creates a new DeploymentController.
func NewDeploymentController(dInformer informers.DeploymentInformer, rsInformer informers.ReplicaSetInformer, podInformer informers.PodInformer, client clientset.Interface) *DeploymentController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset.
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.Core().Events("")})
if client != nil && client.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("deployment_controller", client.Core().RESTClient().GetRateLimiter())
}
dc := &DeploymentController{
client: client,
eventRecorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "deployment-controller"}),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "deployment"),
}
dInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: dc.addDeploymentNotification,
UpdateFunc: dc.updateDeploymentNotification,
// This will enter the sync loop and no-op, because the deployment has been deleted from the store.
DeleteFunc: dc.deleteDeploymentNotification,
})
rsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: dc.addReplicaSet,
UpdateFunc: dc.updateReplicaSet,
DeleteFunc: dc.deleteReplicaSet,
})
dc.syncHandler = dc.syncDeployment
dc.dLister = dInformer.Lister()
dc.rsLister = rsInformer.Lister()
dc.podLister = podInformer.Lister()
dc.dListerSynced = dInformer.Informer().HasSynced
dc.rsListerSynced = dInformer.Informer().HasSynced
dc.podListerSynced = dInformer.Informer().HasSynced
return dc
}
// Run begins watching and syncing.
func (dc *DeploymentController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer dc.queue.ShutDown()
glog.Infof("Starting deployment controller")
if !cache.WaitForCacheSync(stopCh, dc.dListerSynced, dc.rsListerSynced, dc.podListerSynced) {
return
}
for i := 0; i < workers; i++ {
go wait.Until(dc.worker, time.Second, stopCh)
}
<-stopCh
glog.Infof("Shutting down deployment controller")
}
func (dc *DeploymentController) addDeploymentNotification(obj interface{}) {
d := obj.(*extensions.Deployment)
glog.V(4).Infof("Adding deployment %s", d.Name)
dc.enqueueDeployment(d)
}
func (dc *DeploymentController) updateDeploymentNotification(old, cur interface{}) {
oldD := old.(*extensions.Deployment)
glog.V(4).Infof("Updating deployment %s", oldD.Name)
// Resync on deployment object relist.
dc.enqueueDeployment(cur.(*extensions.Deployment))
}
func (dc *DeploymentController) deleteDeploymentNotification(obj interface{}) {
d, ok := obj.(*extensions.Deployment)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("Couldn't get object from tombstone %#v", obj)
return
}
d, ok = tombstone.Obj.(*extensions.Deployment)
if !ok {
glog.Errorf("Tombstone contained object that is not a Deployment %#v", obj)
return
}
}
glog.V(4).Infof("Deleting deployment %s", d.Name)
dc.enqueueDeployment(d)
}
// addReplicaSet enqueues the deployment that manages a ReplicaSet when the ReplicaSet is created.
func (dc *DeploymentController) addReplicaSet(obj interface{}) {
rs := obj.(*extensions.ReplicaSet)
glog.V(4).Infof("ReplicaSet %s added.", rs.Name)
if d := dc.getDeploymentForReplicaSet(rs); d != nil {
dc.enqueueDeployment(d)
}
}
// getDeploymentForReplicaSet returns the deployment managing the given ReplicaSet.
func (dc *DeploymentController) getDeploymentForReplicaSet(rs *extensions.ReplicaSet) *extensions.Deployment {
deployments, err := dc.dLister.GetDeploymentsForReplicaSet(rs)
if err != nil || len(deployments) == 0 {
glog.V(4).Infof("Error: %v. No deployment found for ReplicaSet %v, deployment controller will avoid syncing.", err, rs.Name)
return nil
}
// Because all ReplicaSet's belonging to a deployment should have a unique label key,
// there should never be more than one deployment returned by the above method.
// If that happens we should probably dynamically repair the situation by ultimately
// trying to clean up one of the controllers, for now we just return the older one
if len(deployments) > 1 {
sort.Sort(util.BySelectorLastUpdateTime(deployments))
glog.Errorf("user error! more than one deployment is selecting replica set %s/%s with labels: %#v, returning %s/%s", rs.Namespace, rs.Name, rs.Labels, deployments[0].Namespace, deployments[0].Name)
}
return deployments[0]
}
// updateReplicaSet figures out what deployment(s) manage a ReplicaSet when the ReplicaSet
// is updated and wake them up. If the anything of the ReplicaSets have changed, we need to
// awaken both the old and new deployments. old and cur must be *extensions.ReplicaSet
// types.
func (dc *DeploymentController) updateReplicaSet(old, cur interface{}) {
curRS := cur.(*extensions.ReplicaSet)
oldRS := old.(*extensions.ReplicaSet)
if curRS.ResourceVersion == oldRS.ResourceVersion {
// Periodic resync will send update events for all known replica sets.
// Two different versions of the same replica set will always have different RVs.
return
}
// TODO: Write a unittest for this case
glog.V(4).Infof("ReplicaSet %s updated.", curRS.Name)
if d := dc.getDeploymentForReplicaSet(curRS); d != nil {
dc.enqueueDeployment(d)
}
// A number of things could affect the old deployment: labels changing,
// pod template changing, etc.
if !api.Semantic.DeepEqual(oldRS, curRS) {
if oldD := dc.getDeploymentForReplicaSet(oldRS); oldD != nil {
dc.enqueueDeployment(oldD)
}
}
}
// deleteReplicaSet enqueues the deployment that manages a ReplicaSet when
// the ReplicaSet is deleted. obj could be an *extensions.ReplicaSet, or
// a DeletionFinalStateUnknown marker item.
func (dc *DeploymentController) deleteReplicaSet(obj interface{}) {
rs, ok := obj.(*extensions.ReplicaSet)
// When a delete is dropped, the relist will notice a pod in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the ReplicaSet
// changed labels the new deployment will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("Couldn't get object from tombstone %#v, could take up to %v before a deployment recreates/updates replicasets", obj, FullDeploymentResyncPeriod)
return
}
rs, ok = tombstone.Obj.(*extensions.ReplicaSet)
if !ok {
glog.Errorf("Tombstone contained object that is not a ReplicaSet %#v, could take up to %v before a deployment recreates/updates replicasets", obj, FullDeploymentResyncPeriod)
return
}
}
glog.V(4).Infof("ReplicaSet %s deleted.", rs.Name)
if d := dc.getDeploymentForReplicaSet(rs); d != nil {
dc.enqueueDeployment(d)
}
}
func (dc *DeploymentController) enqueueDeployment(deployment *extensions.Deployment) {
key, err := controller.KeyFunc(deployment)
if err != nil {
glog.Errorf("Couldn't get key for object %#v: %v", deployment, err)
return
}
dc.queue.Add(key)
}
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (dc *DeploymentController) worker() {
work := func() bool {
key, quit := dc.queue.Get()
if quit {
return true
}
defer dc.queue.Done(key)
err := dc.syncHandler(key.(string))
dc.handleErr(err, key)
return false
}
for {
if quit := work(); quit {
return
}
}
}
func (dc *DeploymentController) handleErr(err error, key interface{}) {
if err == nil {
dc.queue.Forget(key)
return
}
if dc.queue.NumRequeues(key) < MaxRetries {
glog.V(2).Infof("Error syncing deployment %v: %v", key, err)
dc.queue.AddRateLimited(key)
return
}
utilruntime.HandleError(err)
glog.V(2).Infof("Dropping deployment %q out of the queue: %v", key, err)
dc.queue.Forget(key)
}
// syncDeployment will sync the deployment with the given key.
// This function is not meant to be invoked concurrently with the same key.
func (dc *DeploymentController) syncDeployment(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing deployment %q (%v)", key, time.Now().Sub(startTime))
}()
obj, exists, err := dc.dLister.Indexer.GetByKey(key)
if err != nil {
glog.Errorf("Unable to retrieve deployment %v from store: %v", key, err)
return err
}
if !exists {
glog.Infof("Deployment has been deleted %v", key)
return nil
}
deployment := obj.(*extensions.Deployment)
// Deep-copy otherwise we are mutating our cache.
// TODO: Deep-copy only when needed.
d, err := util.DeploymentDeepCopy(deployment)
if err != nil {
return err
}
everything := metav1.LabelSelector{}
if reflect.DeepEqual(d.Spec.Selector, &everything) {
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.")
if d.Status.ObservedGeneration < d.Generation {
d.Status.ObservedGeneration = d.Generation
dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d)
}
return nil
}
if d.DeletionTimestamp != nil {
return dc.syncStatusOnly(d)
}
// Handle overlapping deployments by deterministically avoid syncing deployments that fight over ReplicaSets.
if err = dc.handleOverlap(d); err != nil {
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectorOverlap", err.Error())
return nil
}
// Update deployment conditions with an Unknown condition when pausing/resuming
// a deployment. In this way, we can be sure that we won't timeout when a user
// resumes a Deployment with a set progressDeadlineSeconds.
if err = dc.checkPausedConditions(d); err != nil {
return err
}
_, err = dc.hasFailed(d)
if err != nil {
return err
}
// TODO: Automatically rollback here if we failed above. Locate the last complete
// revision and populate the rollback spec with it.
// See https://github.com/kubernetes/kubernetes/issues/23211.
if d.Spec.Paused {
return dc.sync(d)
}
if d.Spec.RollbackTo != nil {
revision := d.Spec.RollbackTo.Revision
if d, err = dc.rollback(d, &revision); err != nil {
return err
}
}
scalingEvent, err := dc.isScalingEvent(d)
if err != nil {
return err
}
if scalingEvent {
return dc.sync(d)
}
switch d.Spec.Strategy.Type {
case extensions.RecreateDeploymentStrategyType:
return dc.rolloutRecreate(d)
case extensions.RollingUpdateDeploymentStrategyType:
return dc.rolloutRolling(d)
}
return fmt.Errorf("unexpected deployment strategy type: %s", d.Spec.Strategy.Type)
}
// handleOverlap relists all deployment in the same namespace for overlaps, and avoid syncing
// the newer overlapping ones (only sync the oldest one). New/old is determined by when the
// deployment's selector is last updated.
func (dc *DeploymentController) handleOverlap(d *extensions.Deployment) error {
deployments, err := dc.dLister.Deployments(d.Namespace).List(labels.Everything())
if err != nil {
return fmt.Errorf("error listing deployments in namespace %s: %v", d.Namespace, err)
}
overlapping := false
for _, other := range deployments {
foundOverlaps, err := util.OverlapsWith(d, other)
if err != nil {
return err
}
if foundOverlaps {
deploymentCopy, err := util.DeploymentDeepCopy(other)
if err != nil {
return err
}
overlapping = true
// Skip syncing this one if older overlapping one is found.
if util.SelectorUpdatedBefore(deploymentCopy, d) {
// We don't care if the overlapping annotation update failed or not (we don't make decision on it)
dc.markDeploymentOverlap(d, deploymentCopy.Name)
dc.clearDeploymentOverlap(deploymentCopy)
return fmt.Errorf("found deployment %s/%s has overlapping selector with an older deployment %s/%s, skip syncing it", d.Namespace, d.Name, deploymentCopy.Namespace, deploymentCopy.Name)
}
dc.markDeploymentOverlap(deploymentCopy, d.Name)
d, _ = dc.clearDeploymentOverlap(d)
}
}
if !overlapping {
// We don't care if the overlapping annotation update failed or not (we don't make decision on it)
d, _ = dc.clearDeploymentOverlap(d)
}
return nil
}
func (dc *DeploymentController) markDeploymentOverlap(deployment *extensions.Deployment, withDeployment string) (*extensions.Deployment, error) {
if deployment.Annotations[util.OverlapAnnotation] == withDeployment && deployment.Status.ObservedGeneration >= deployment.Generation {
return deployment, nil
}
if deployment.Annotations == nil {
deployment.Annotations = make(map[string]string)
}
// Update observedGeneration for overlapping deployments so that their deletion won't be blocked.
deployment.Status.ObservedGeneration = deployment.Generation
deployment.Annotations[util.OverlapAnnotation] = withDeployment
return dc.client.Extensions().Deployments(deployment.Namespace).UpdateStatus(deployment)
}
func (dc *DeploymentController) clearDeploymentOverlap(deployment *extensions.Deployment) (*extensions.Deployment, error) {
if len(deployment.Annotations[util.OverlapAnnotation]) == 0 {
return deployment, nil
}
delete(deployment.Annotations, util.OverlapAnnotation)
return dc.client.Extensions().Deployments(deployment.Namespace).UpdateStatus(deployment)
}

View file

@ -0,0 +1,286 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"fmt"
"testing"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/uuid"
)
var (
alwaysReady = func() bool { return true }
noTimestamp = metav1.Time{}
)
func rs(name string, replicas int, selector map[string]string, timestamp metav1.Time) *extensions.ReplicaSet {
return &extensions.ReplicaSet{
ObjectMeta: v1.ObjectMeta{
Name: name,
CreationTimestamp: timestamp,
Namespace: v1.NamespaceDefault,
},
Spec: extensions.ReplicaSetSpec{
Replicas: func() *int32 { i := int32(replicas); return &i }(),
Selector: &metav1.LabelSelector{MatchLabels: selector},
Template: v1.PodTemplateSpec{},
},
}
}
func newRSWithStatus(name string, specReplicas, statusReplicas int, selector map[string]string) *extensions.ReplicaSet {
rs := rs(name, specReplicas, selector, noTimestamp)
rs.Status = extensions.ReplicaSetStatus{
Replicas: int32(statusReplicas),
}
return rs
}
func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSurge, maxUnavailable *intstr.IntOrString, selector map[string]string) *extensions.Deployment {
d := extensions.Deployment{
TypeMeta: metav1.TypeMeta{APIVersion: registered.GroupOrDie(extensions.GroupName).GroupVersion.String()},
ObjectMeta: v1.ObjectMeta{
UID: uuid.NewUUID(),
Name: name,
Namespace: v1.NamespaceDefault,
},
Spec: extensions.DeploymentSpec{
Strategy: extensions.DeploymentStrategy{
Type: extensions.RollingUpdateDeploymentStrategyType,
RollingUpdate: &extensions.RollingUpdateDeployment{
MaxUnavailable: func() *intstr.IntOrString { i := intstr.FromInt(0); return &i }(),
MaxSurge: func() *intstr.IntOrString { i := intstr.FromInt(0); return &i }(),
},
},
Replicas: func() *int32 { i := int32(replicas); return &i }(),
Selector: &metav1.LabelSelector{MatchLabels: selector},
Template: v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: selector,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Image: "foo/bar",
},
},
},
},
RevisionHistoryLimit: revisionHistoryLimit,
},
}
if maxSurge != nil {
d.Spec.Strategy.RollingUpdate.MaxSurge = maxSurge
}
if maxUnavailable != nil {
d.Spec.Strategy.RollingUpdate.MaxUnavailable = maxUnavailable
}
return &d
}
func newReplicaSet(d *extensions.Deployment, name string, replicas int) *extensions.ReplicaSet {
return &extensions.ReplicaSet{
ObjectMeta: v1.ObjectMeta{
Name: name,
Namespace: v1.NamespaceDefault,
},
Spec: extensions.ReplicaSetSpec{
Replicas: func() *int32 { i := int32(replicas); return &i }(),
Template: d.Spec.Template,
},
}
}
func getKey(d *extensions.Deployment, t *testing.T) string {
if key, err := controller.KeyFunc(d); err != nil {
t.Errorf("Unexpected error getting key for deployment %v: %v", d.Name, err)
return ""
} else {
return key
}
}
type fixture struct {
t *testing.T
client *fake.Clientset
// Objects to put in the store.
dLister []*extensions.Deployment
rsLister []*extensions.ReplicaSet
podLister []*v1.Pod
// Actions expected to happen on the client. Objects from here are also
// preloaded into NewSimpleFake.
actions []core.Action
objects []runtime.Object
}
func (f *fixture) expectUpdateDeploymentAction(d *extensions.Deployment) {
f.actions = append(f.actions, core.NewUpdateAction(schema.GroupVersionResource{Resource: "deployments"}, d.Namespace, d))
}
func (f *fixture) expectUpdateDeploymentStatusAction(d *extensions.Deployment) {
action := core.NewUpdateAction(schema.GroupVersionResource{Resource: "deployments"}, d.Namespace, d)
action.Subresource = "status"
f.actions = append(f.actions, action)
}
func (f *fixture) expectCreateRSAction(rs *extensions.ReplicaSet) {
f.actions = append(f.actions, core.NewCreateAction(schema.GroupVersionResource{Resource: "replicasets"}, rs.Namespace, rs))
}
func newFixture(t *testing.T) *fixture {
f := &fixture{}
f.t = t
f.objects = []runtime.Object{}
return f
}
func (f *fixture) run(deploymentName string) {
f.client = fake.NewSimpleClientset(f.objects...)
informers := informers.NewSharedInformerFactory(f.client, nil, controller.NoResyncPeriodFunc())
c := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), f.client)
c.eventRecorder = &record.FakeRecorder{}
c.dListerSynced = alwaysReady
c.rsListerSynced = alwaysReady
c.podListerSynced = alwaysReady
for _, d := range f.dLister {
c.dLister.Indexer.Add(d)
}
for _, rs := range f.rsLister {
c.rsLister.Indexer.Add(rs)
}
for _, pod := range f.podLister {
c.podLister.Indexer.Add(pod)
}
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
err := c.syncDeployment(deploymentName)
if err != nil {
f.t.Errorf("error syncing deployment: %v", err)
}
actions := filterInformerActions(f.client.Actions())
for i, action := range actions {
if len(f.actions) < i+1 {
f.t.Errorf("%d unexpected actions: %+v", len(actions)-len(f.actions), actions[i:])
break
}
expectedAction := f.actions[i]
if !expectedAction.Matches(action.GetVerb(), action.GetResource().Resource) {
f.t.Errorf("Expected\n\t%#v\ngot\n\t%#v", expectedAction, action)
continue
}
}
if len(f.actions) > len(actions) {
f.t.Errorf("%d additional expected actions:%+v", len(f.actions)-len(actions), f.actions[len(actions):])
}
}
func TestSyncDeploymentCreatesReplicaSet(t *testing.T) {
f := newFixture(t)
d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
f.dLister = append(f.dLister, d)
f.objects = append(f.objects, d)
rs := newReplicaSet(d, "deploymentrs-4186632231", 1)
f.expectCreateRSAction(rs)
f.expectUpdateDeploymentAction(d)
f.expectUpdateDeploymentStatusAction(d)
f.run(getKey(d, t))
}
func TestSyncDeploymentDontDoAnythingDuringDeletion(t *testing.T) {
f := newFixture(t)
d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
now := metav1.Now()
d.DeletionTimestamp = &now
f.dLister = append(f.dLister, d)
f.run(getKey(d, t))
}
// issue: https://github.com/kubernetes/kubernetes/issues/23218
func TestDeploymentController_dontSyncDeploymentsWithEmptyPodSelector(t *testing.T) {
fake := &fake.Clientset{}
informers := informers.NewSharedInformerFactory(fake, nil, controller.NoResyncPeriodFunc())
controller := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), fake)
controller.eventRecorder = &record.FakeRecorder{}
controller.dListerSynced = alwaysReady
controller.rsListerSynced = alwaysReady
controller.podListerSynced = alwaysReady
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
empty := metav1.LabelSelector{}
d.Spec.Selector = &empty
controller.dLister.Indexer.Add(d)
// We expect the deployment controller to not take action here since it's configuration
// is invalid, even though no replicasets exist that match it's selector.
controller.syncDeployment(fmt.Sprintf("%s/%s", d.ObjectMeta.Namespace, d.ObjectMeta.Name))
filteredActions := filterInformerActions(fake.Actions())
if len(filteredActions) == 0 {
return
}
for _, action := range filteredActions {
t.Logf("unexpected action: %#v", action)
}
t.Errorf("expected deployment controller to not take action")
}
func filterInformerActions(actions []core.Action) []core.Action {
ret := []core.Action{}
for _, action := range actions {
if len(action.GetNamespace()) == 0 &&
(action.Matches("list", "pods") ||
action.Matches("list", "deployments") ||
action.Matches("list", "replicasets") ||
action.Matches("watch", "pods") ||
action.Matches("watch", "deployments") ||
action.Matches("watch", "replicasets")) {
continue
}
ret = append(ret, action)
}
return ret
}

View file

@ -0,0 +1,189 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"fmt"
"reflect"
"k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/controller/deployment/util"
)
// hasFailed determines if a deployment has failed or not by estimating its progress.
// Progress for a deployment is considered when a new replica set is created or adopted,
// and when new pods scale up or old pods scale down. Progress is not estimated for paused
// deployments or when users don't really care about it ie. progressDeadlineSeconds is not
// specified.
func (dc *DeploymentController) hasFailed(d *extensions.Deployment) (bool, error) {
if d.Spec.ProgressDeadlineSeconds == nil || d.Spec.RollbackTo != nil || d.Spec.Paused {
return false, nil
}
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, false)
if err != nil {
return false, err
}
// There is a template change so we don't need to check for any progress right now.
if newRS == nil {
return false, nil
}
// Look at the status of the deployment - if there is already a NewRSAvailableReason
// then we don't need to estimate any progress. This is needed in order to avoid
// estimating progress for scaling events after a rollout has finished.
cond := util.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing)
if cond != nil && cond.Reason == util.NewRSAvailableReason {
return false, nil
}
// TODO: Look for permanent failures here.
// See https://github.com/kubernetes/kubernetes/issues/18568
allRSs := append(oldRSs, newRS)
newStatus := dc.calculateStatus(allRSs, newRS, d)
// If the deployment is complete or it is progressing, there is no need to check if it
// has timed out.
if util.DeploymentComplete(d, &newStatus) || util.DeploymentProgressing(d, &newStatus) {
return false, nil
}
// Check if the deployment has timed out.
return util.DeploymentTimedOut(d, &newStatus), nil
}
// syncRolloutStatus updates the status of a deployment during a rollout. There are
// cases this helper will run that cannot be prevented from the scaling detection,
// for example a resync of the deployment after it was scaled up. In those cases,
// we shouldn't try to estimate any progress.
func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, d *extensions.Deployment) error {
newStatus := dc.calculateStatus(allRSs, newRS, d)
// If there is no progressDeadlineSeconds set, remove any Progressing condition.
if d.Spec.ProgressDeadlineSeconds == nil {
util.RemoveDeploymentCondition(&newStatus, extensions.DeploymentProgressing)
}
// If there is only one replica set that is active then that means we are not running
// a new rollout and this is a resync where we don't need to estimate any progress.
// In such a case, we should simply not estimate any progress for this deployment.
currentCond := util.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing)
isResyncEvent := newStatus.Replicas == newStatus.UpdatedReplicas && currentCond != nil && currentCond.Reason == util.NewRSAvailableReason
// Check for progress only if there is a progress deadline set and the latest rollout
// hasn't completed yet. We also need to ensure the new replica set exists, otherwise
// we cannot estimate any progress.
if d.Spec.ProgressDeadlineSeconds != nil && !isResyncEvent && newRS != nil {
switch {
case util.DeploymentComplete(d, &newStatus):
// Update the deployment conditions with a message for the new replica set that
// was successfully deployed. If the condition already exists, we ignore this update.
msg := fmt.Sprintf("Replica set %q has successfully progressed.", newRS.Name)
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, util.NewRSAvailableReason, msg)
util.SetDeploymentCondition(&newStatus, *condition)
case util.DeploymentProgressing(d, &newStatus):
// If there is any progress made, continue by not checking if the deployment failed. This
// behavior emulates the rolling updater progressDeadline check.
msg := fmt.Sprintf("Replica set %q is progressing.", newRS.Name)
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, util.ReplicaSetUpdatedReason, msg)
// Update the current Progressing condition or add a new one if it doesn't exist.
// If a Progressing condition with status=true already exists, we should update
// everything but lastTransitionTime. SetDeploymentCondition already does that but
// it also is not updating conditions when the reason of the new condition is the
// same as the old. The Progressing condition is a special case because we want to
// update with the same reason and change just lastUpdateTime iff we notice any
// progress. That's why we handle it here.
if currentCond != nil {
if currentCond.Status == v1.ConditionTrue {
condition.LastTransitionTime = currentCond.LastTransitionTime
}
util.RemoveDeploymentCondition(&newStatus, extensions.DeploymentProgressing)
}
util.SetDeploymentCondition(&newStatus, *condition)
case util.DeploymentTimedOut(d, &newStatus):
// Update the deployment with a timeout condition. If the condition already exists,
// we ignore this update.
msg := fmt.Sprintf("Replica set %q has timed out progressing.", newRS.Name)
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionFalse, util.TimedOutReason, msg)
util.SetDeploymentCondition(&newStatus, *condition)
}
}
// Move failure conditions of all replica sets in deployment conditions. For now,
// only one failure condition is returned from getReplicaFailures.
if replicaFailureCond := dc.getReplicaFailures(allRSs, newRS); len(replicaFailureCond) > 0 {
// There will be only one ReplicaFailure condition on the replica set.
util.SetDeploymentCondition(&newStatus, replicaFailureCond[0])
} else {
util.RemoveDeploymentCondition(&newStatus, extensions.DeploymentReplicaFailure)
}
// Do not update if there is nothing new to add.
if reflect.DeepEqual(d.Status, newStatus) {
// TODO: If there is no sign of progress at this point then there is a high chance that the
// deployment is stuck. We should resync this deployment at some point[1] in the future[2] and
// check if it has timed out. We definitely need this, otherwise we depend on the controller
// resync interval. See https://github.com/kubernetes/kubernetes/issues/34458.
//
// [1] time.Now() + progressDeadlineSeconds - lastUpdateTime (of the Progressing condition).
// [2] Use dc.queue.AddAfter
return nil
}
newDeployment := d
newDeployment.Status = newStatus
_, err := dc.client.Extensions().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
return err
}
// getReplicaFailures will convert replica failure conditions from replica sets
// to deployment conditions.
func (dc *DeploymentController) getReplicaFailures(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) []extensions.DeploymentCondition {
var conditions []extensions.DeploymentCondition
if newRS != nil {
for _, c := range newRS.Status.Conditions {
if c.Type != extensions.ReplicaSetReplicaFailure {
continue
}
conditions = append(conditions, util.ReplicaSetToDeploymentCondition(c))
}
}
// Return failures for the new replica set over failures from old replica sets.
if len(conditions) > 0 {
return conditions
}
for i := range allRSs {
rs := allRSs[i]
if rs == nil {
continue
}
for _, c := range rs.Status.Conditions {
if c.Type != extensions.ReplicaSetReplicaFailure {
continue
}
conditions = append(conditions, util.ReplicaSetToDeploymentCondition(c))
}
}
return conditions
}

View file

@ -0,0 +1,138 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"fmt"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/retry"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/util/wait"
)
// rolloutRecreate implements the logic for recreating a replica set.
func (dc *DeploymentController) rolloutRecreate(deployment *extensions.Deployment) error {
// Don't create a new RS if not already existed, so that we avoid scaling up before scaling down
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(deployment, false)
if err != nil {
return err
}
allRSs := append(oldRSs, newRS)
activeOldRSs := controller.FilterActiveReplicaSets(oldRSs)
// scale down old replica sets
scaledDown, err := dc.scaleDownOldReplicaSetsForRecreate(activeOldRSs, deployment)
if err != nil {
return err
}
if scaledDown {
// Update DeploymentStatus
return dc.syncRolloutStatus(allRSs, newRS, deployment)
}
// Wait for all old replica set to scale down to zero.
if err := dc.waitForInactiveReplicaSets(activeOldRSs); err != nil {
return err
}
// If we need to create a new RS, create it now
// TODO: Create a new RS without re-listing all RSs.
if newRS == nil {
newRS, oldRSs, err = dc.getAllReplicaSetsAndSyncRevision(deployment, true)
if err != nil {
return err
}
allRSs = append(oldRSs, newRS)
}
// scale up new replica set
scaledUp, err := dc.scaleUpNewReplicaSetForRecreate(newRS, deployment)
if err != nil {
return err
}
if scaledUp {
// Update DeploymentStatus
return dc.syncRolloutStatus(allRSs, newRS, deployment)
}
dc.cleanupDeployment(oldRSs, deployment)
// Sync deployment status
return dc.syncRolloutStatus(allRSs, newRS, deployment)
}
// scaleDownOldReplicaSetsForRecreate scales down old replica sets when deployment strategy is "Recreate"
func (dc *DeploymentController) scaleDownOldReplicaSetsForRecreate(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) {
scaled := false
for i := range oldRSs {
rs := oldRSs[i]
// Scaling not required.
if *(rs.Spec.Replicas) == 0 {
continue
}
scaledRS, updatedRS, err := dc.scaleReplicaSetAndRecordEvent(rs, 0, deployment)
if err != nil {
return false, err
}
if scaledRS {
oldRSs[i] = updatedRS
scaled = true
}
}
return scaled, nil
}
// waitForInactiveReplicaSets will wait until all passed replica sets are inactive and have been noticed
// by the replica set controller.
func (dc *DeploymentController) waitForInactiveReplicaSets(oldRSs []*extensions.ReplicaSet) error {
for i := range oldRSs {
rs := oldRSs[i]
desiredGeneration := rs.Generation
observedGeneration := rs.Status.ObservedGeneration
specReplicas := *(rs.Spec.Replicas)
statusReplicas := rs.Status.Replicas
if err := wait.ExponentialBackoff(retry.DefaultRetry, func() (bool, error) {
replicaSet, err := dc.rsLister.ReplicaSets(rs.Namespace).Get(rs.Name)
if err != nil {
return false, err
}
specReplicas = *(replicaSet.Spec.Replicas)
statusReplicas = replicaSet.Status.Replicas
observedGeneration = replicaSet.Status.ObservedGeneration
// TODO: We also need to wait for terminating replicas to actually terminate.
// See https://github.com/kubernetes/kubernetes/issues/32567
return observedGeneration >= desiredGeneration && *(replicaSet.Spec.Replicas) == 0 && replicaSet.Status.Replicas == 0, nil
}); err != nil {
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("replica set %q never became inactive: synced=%t, spec.replicas=%d, status.replicas=%d",
rs.Name, observedGeneration >= desiredGeneration, specReplicas, statusReplicas)
}
return err
}
}
return nil
}
// scaleUpNewReplicaSetForRecreate scales up new replica set when deployment strategy is "Recreate"
func (dc *DeploymentController) scaleUpNewReplicaSetForRecreate(newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) {
scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, *(deployment.Spec.Replicas), deployment)
return scaled, err
}

View file

@ -0,0 +1,106 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"fmt"
"reflect"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
)
// Rolling back to a revision; no-op if the toRevision is deployment's current revision
func (dc *DeploymentController) rollback(deployment *extensions.Deployment, toRevision *int64) (*extensions.Deployment, error) {
newRS, allOldRSs, err := dc.getAllReplicaSetsAndSyncRevision(deployment, true)
if err != nil {
return nil, err
}
allRSs := append(allOldRSs, newRS)
// If rollback revision is 0, rollback to the last revision
if *toRevision == 0 {
if *toRevision = deploymentutil.LastRevision(allRSs); *toRevision == 0 {
// If we still can't find the last revision, gives up rollback
dc.emitRollbackWarningEvent(deployment, deploymentutil.RollbackRevisionNotFound, "Unable to find last revision.")
// Gives up rollback
return dc.updateDeploymentAndClearRollbackTo(deployment)
}
}
for _, rs := range allRSs {
v, err := deploymentutil.Revision(rs)
if err != nil {
glog.V(4).Infof("Unable to extract revision from deployment's replica set %q: %v", rs.Name, err)
continue
}
if v == *toRevision {
glog.V(4).Infof("Found replica set %q with desired revision %d", rs.Name, v)
// rollback by copying podTemplate.Spec from the replica set
// revision number will be incremented during the next getAllReplicaSetsAndSyncRevision call
// no-op if the the spec matches current deployment's podTemplate.Spec
deployment, performedRollback, err := dc.rollbackToTemplate(deployment, rs)
if performedRollback && err == nil {
dc.emitRollbackNormalEvent(deployment, fmt.Sprintf("Rolled back deployment %q to revision %d", deployment.Name, *toRevision))
}
return deployment, err
}
}
dc.emitRollbackWarningEvent(deployment, deploymentutil.RollbackRevisionNotFound, "Unable to find the revision to rollback to.")
// Gives up rollback
return dc.updateDeploymentAndClearRollbackTo(deployment)
}
func (dc *DeploymentController) rollbackToTemplate(deployment *extensions.Deployment, rs *extensions.ReplicaSet) (d *extensions.Deployment, performedRollback bool, err error) {
if !reflect.DeepEqual(deploymentutil.GetNewReplicaSetTemplate(deployment), rs.Spec.Template) {
glog.Infof("Rolling back deployment %s to template spec %+v", deployment.Name, rs.Spec.Template.Spec)
deploymentutil.SetFromReplicaSetTemplate(deployment, rs.Spec.Template)
// set RS (the old RS we'll rolling back to) annotations back to the deployment;
// otherwise, the deployment's current annotations (should be the same as current new RS) will be copied to the RS after the rollback.
//
// For example,
// A Deployment has old RS1 with annotation {change-cause:create}, and new RS2 {change-cause:edit}.
// Note that both annotations are copied from Deployment, and the Deployment should be annotated {change-cause:edit} as well.
// Now, rollback Deployment to RS1, we should update Deployment's pod-template and also copy annotation from RS1.
// Deployment is now annotated {change-cause:create}, and we have new RS1 {change-cause:create}, old RS2 {change-cause:edit}.
//
// If we don't copy the annotations back from RS to deployment on rollback, the Deployment will stay as {change-cause:edit},
// and new RS1 becomes {change-cause:edit} (copied from deployment after rollback), old RS2 {change-cause:edit}, which is not correct.
deploymentutil.SetDeploymentAnnotationsTo(deployment, rs)
performedRollback = true
} else {
glog.V(4).Infof("Rolling back to a revision that contains the same template as current deployment %s, skipping rollback...", deployment.Name)
dc.emitRollbackWarningEvent(deployment, deploymentutil.RollbackTemplateUnchanged, fmt.Sprintf("The rollback revision contains the same template as current deployment %q", deployment.Name))
}
d, err = dc.updateDeploymentAndClearRollbackTo(deployment)
return
}
func (dc *DeploymentController) emitRollbackWarningEvent(deployment *extensions.Deployment, reason, message string) {
dc.eventRecorder.Eventf(deployment, v1.EventTypeWarning, reason, message)
}
func (dc *DeploymentController) emitRollbackNormalEvent(deployment *extensions.Deployment, message string) {
dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, deploymentutil.RollbackDone, message)
}
// updateDeploymentAndClearRollbackTo sets .spec.rollbackTo to nil and update the input deployment
func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(deployment *extensions.Deployment) (*extensions.Deployment, error) {
glog.V(4).Infof("Cleans up rollbackTo of deployment %s", deployment.Name)
deployment.Spec.RollbackTo = nil
return dc.client.Extensions().Deployments(deployment.ObjectMeta.Namespace).Update(deployment)
}

View file

@ -0,0 +1,229 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"fmt"
"sort"
"github.com/golang/glog"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/controller"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/util/integer"
)
// rolloutRolling implements the logic for rolling a new replica set.
func (dc *DeploymentController) rolloutRolling(deployment *extensions.Deployment) error {
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(deployment, true)
if err != nil {
return err
}
allRSs := append(oldRSs, newRS)
// Scale up, if we can.
scaledUp, err := dc.reconcileNewReplicaSet(allRSs, newRS, deployment)
if err != nil {
return err
}
if scaledUp {
// Update DeploymentStatus
return dc.syncRolloutStatus(allRSs, newRS, deployment)
}
// Scale down, if we can.
scaledDown, err := dc.reconcileOldReplicaSets(allRSs, controller.FilterActiveReplicaSets(oldRSs), newRS, deployment)
if err != nil {
return err
}
if scaledDown {
// Update DeploymentStatus
return dc.syncRolloutStatus(allRSs, newRS, deployment)
}
dc.cleanupDeployment(oldRSs, deployment)
// Sync deployment status
return dc.syncRolloutStatus(allRSs, newRS, deployment)
}
func (dc *DeploymentController) reconcileNewReplicaSet(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) {
if *(newRS.Spec.Replicas) == *(deployment.Spec.Replicas) {
// Scaling not required.
return false, nil
}
if *(newRS.Spec.Replicas) > *(deployment.Spec.Replicas) {
// Scale down.
scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, *(deployment.Spec.Replicas), deployment)
return scaled, err
}
newReplicasCount, err := deploymentutil.NewRSNewReplicas(deployment, allRSs, newRS)
if err != nil {
return false, err
}
scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, newReplicasCount, deployment)
return scaled, err
}
func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) {
oldPodsCount := deploymentutil.GetReplicaCountForReplicaSets(oldRSs)
if oldPodsCount == 0 {
// Can't scale down further
return false, nil
}
allPodsCount := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
glog.V(4).Infof("New replica set %s/%s has %d available pods.", newRS.Namespace, newRS.Name, newRS.Status.AvailableReplicas)
maxUnavailable := deploymentutil.MaxUnavailable(*deployment)
// Check if we can scale down. We can scale down in the following 2 cases:
// * Some old replica sets have unhealthy replicas, we could safely scale down those unhealthy replicas since that won't further
// increase unavailability.
// * New replica set has scaled up and it's replicas becomes ready, then we can scale down old replica sets in a further step.
//
// maxScaledDown := allPodsCount - minAvailable - newReplicaSetPodsUnavailable
// take into account not only maxUnavailable and any surge pods that have been created, but also unavailable pods from
// the newRS, so that the unavailable pods from the newRS would not make us scale down old replica sets in a further
// step(that will increase unavailability).
//
// Concrete example:
//
// * 10 replicas
// * 2 maxUnavailable (absolute number, not percent)
// * 3 maxSurge (absolute number, not percent)
//
// case 1:
// * Deployment is updated, newRS is created with 3 replicas, oldRS is scaled down to 8, and newRS is scaled up to 5.
// * The new replica set pods crashloop and never become available.
// * allPodsCount is 13. minAvailable is 8. newRSPodsUnavailable is 5.
// * A node fails and causes one of the oldRS pods to become unavailable. However, 13 - 8 - 5 = 0, so the oldRS won't be scaled down.
// * The user notices the crashloop and does kubectl rollout undo to rollback.
// * newRSPodsUnavailable is 1, since we rolled back to the good replica set, so maxScaledDown = 13 - 8 - 1 = 4. 4 of the crashlooping pods will be scaled down.
// * The total number of pods will then be 9 and the newRS can be scaled up to 10.
//
// case 2:
// Same example, but pushing a new pod template instead of rolling back (aka "roll over"):
// * The new replica set created must start with 0 replicas because allPodsCount is already at 13.
// * However, newRSPodsUnavailable would also be 0, so the 2 old replica sets could be scaled down by 5 (13 - 8 - 0), which would then
// allow the new replica set to be scaled up by 5.
minAvailable := *(deployment.Spec.Replicas) - maxUnavailable
newRSUnavailablePodCount := *(newRS.Spec.Replicas) - newRS.Status.AvailableReplicas
maxScaledDown := allPodsCount - minAvailable - newRSUnavailablePodCount
if maxScaledDown <= 0 {
return false, nil
}
// Clean up unhealthy replicas first, otherwise unhealthy replicas will block deployment
// and cause timeout. See https://github.com/kubernetes/kubernetes/issues/16737
oldRSs, cleanupCount, err := dc.cleanupUnhealthyReplicas(oldRSs, deployment, maxScaledDown)
if err != nil {
return false, nil
}
glog.V(4).Infof("Cleaned up unhealthy replicas from old RSes by %d", cleanupCount)
// Scale down old replica sets, need check maxUnavailable to ensure we can scale down
allRSs = append(oldRSs, newRS)
scaledDownCount, err := dc.scaleDownOldReplicaSetsForRollingUpdate(allRSs, oldRSs, deployment)
if err != nil {
return false, nil
}
glog.V(4).Infof("Scaled down old RSes of deployment %s by %d", deployment.Name, scaledDownCount)
totalScaledDown := cleanupCount + scaledDownCount
return totalScaledDown > 0, nil
}
// cleanupUnhealthyReplicas will scale down old replica sets with unhealthy replicas, so that all unhealthy replicas will be deleted.
func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment, maxCleanupCount int32) ([]*extensions.ReplicaSet, int32, error) {
sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))
// Safely scale down all old replica sets with unhealthy replicas. Replica set will sort the pods in the order
// such that not-ready < ready, unscheduled < scheduled, and pending < running. This ensures that unhealthy replicas will
// been deleted first and won't increase unavailability.
totalScaledDown := int32(0)
for i, targetRS := range oldRSs {
if totalScaledDown >= maxCleanupCount {
break
}
if *(targetRS.Spec.Replicas) == 0 {
// cannot scale down this replica set.
continue
}
glog.V(4).Infof("Found %d available pods in old RS %s/%s", targetRS.Status.AvailableReplicas, targetRS.Namespace, targetRS.Name)
if *(targetRS.Spec.Replicas) == targetRS.Status.AvailableReplicas {
// no unhealthy replicas found, no scaling required.
continue
}
scaledDownCount := int32(integer.IntMin(int(maxCleanupCount-totalScaledDown), int(*(targetRS.Spec.Replicas)-targetRS.Status.AvailableReplicas)))
newReplicasCount := *(targetRS.Spec.Replicas) - scaledDownCount
if newReplicasCount > *(targetRS.Spec.Replicas) {
return nil, 0, fmt.Errorf("when cleaning up unhealthy replicas, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, *(targetRS.Spec.Replicas), newReplicasCount)
}
_, updatedOldRS, err := dc.scaleReplicaSetAndRecordEvent(targetRS, newReplicasCount, deployment)
if err != nil {
return nil, totalScaledDown, err
}
totalScaledDown += scaledDownCount
oldRSs[i] = updatedOldRS
}
return oldRSs, totalScaledDown, nil
}
// scaleDownOldReplicaSetsForRollingUpdate scales down old replica sets when deployment strategy is "RollingUpdate".
// Need check maxUnavailable to ensure availability
func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs []*extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) (int32, error) {
maxUnavailable := deploymentutil.MaxUnavailable(*deployment)
// Check if we can scale down.
minAvailable := *(deployment.Spec.Replicas) - maxUnavailable
// Find the number of available pods.
availablePodCount := deploymentutil.GetAvailableReplicaCountForReplicaSets(allRSs)
if availablePodCount <= minAvailable {
// Cannot scale down.
return 0, nil
}
glog.V(4).Infof("Found %d available pods in deployment %s, scaling down old RSes", availablePodCount, deployment.Name)
sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))
totalScaledDown := int32(0)
totalScaleDownCount := availablePodCount - minAvailable
for _, targetRS := range oldRSs {
if totalScaledDown >= totalScaleDownCount {
// No further scaling required.
break
}
if *(targetRS.Spec.Replicas) == 0 {
// cannot scale down this ReplicaSet.
continue
}
// Scale down.
scaleDownCount := int32(integer.IntMin(int(*(targetRS.Spec.Replicas)), int(totalScaleDownCount-totalScaledDown)))
newReplicasCount := *(targetRS.Spec.Replicas) - scaleDownCount
if newReplicasCount > *(targetRS.Spec.Replicas) {
return 0, fmt.Errorf("when scaling down old RS, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, *(targetRS.Spec.Replicas), newReplicasCount)
}
_, _, err := dc.scaleReplicaSetAndRecordEvent(targetRS, newReplicasCount, deployment)
if err != nil {
return totalScaledDown, err
}
totalScaledDown += scaleDownCount
}
return totalScaledDown, nil
}

View file

@ -0,0 +1,379 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"testing"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/util/intstr"
)
func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
tests := []struct {
deploymentReplicas int
maxSurge intstr.IntOrString
oldReplicas int
newReplicas int
scaleExpected bool
expectedNewReplicas int
}{
{
// Should not scale up.
deploymentReplicas: 10,
maxSurge: intstr.FromInt(0),
oldReplicas: 10,
newReplicas: 0,
scaleExpected: false,
},
{
deploymentReplicas: 10,
maxSurge: intstr.FromInt(2),
oldReplicas: 10,
newReplicas: 0,
scaleExpected: true,
expectedNewReplicas: 2,
},
{
deploymentReplicas: 10,
maxSurge: intstr.FromInt(2),
oldReplicas: 5,
newReplicas: 0,
scaleExpected: true,
expectedNewReplicas: 7,
},
{
deploymentReplicas: 10,
maxSurge: intstr.FromInt(2),
oldReplicas: 10,
newReplicas: 2,
scaleExpected: false,
},
{
// Should scale down.
deploymentReplicas: 10,
maxSurge: intstr.FromInt(2),
oldReplicas: 2,
newReplicas: 11,
scaleExpected: true,
expectedNewReplicas: 10,
},
}
for i := range tests {
test := tests[i]
t.Logf("executing scenario %d", i)
newRS := rs("foo-v2", test.newReplicas, nil, noTimestamp)
oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp)
allRSs := []*extensions.ReplicaSet{newRS, oldRS}
maxUnavailable := intstr.FromInt(0)
deployment := newDeployment("foo", test.deploymentReplicas, nil, &test.maxSurge, &maxUnavailable, map[string]string{"foo": "bar"})
fake := fake.Clientset{}
controller := &DeploymentController{
client: &fake,
eventRecorder: &record.FakeRecorder{},
}
scaled, err := controller.reconcileNewReplicaSet(allRSs, newRS, deployment)
if err != nil {
t.Errorf("unexpected error: %v", err)
continue
}
if !test.scaleExpected {
if scaled || len(fake.Actions()) > 0 {
t.Errorf("unexpected scaling: %v", fake.Actions())
}
continue
}
if test.scaleExpected && !scaled {
t.Errorf("expected scaling to occur")
continue
}
if len(fake.Actions()) != 1 {
t.Errorf("expected 1 action during scale, got: %v", fake.Actions())
continue
}
updated := fake.Actions()[0].(core.UpdateAction).GetObject().(*extensions.ReplicaSet)
if e, a := test.expectedNewReplicas, int(*(updated.Spec.Replicas)); e != a {
t.Errorf("expected update to %d replicas, got %d", e, a)
}
}
}
func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) {
tests := []struct {
deploymentReplicas int
maxUnavailable intstr.IntOrString
oldReplicas int
newReplicas int
readyPodsFromOldRS int
readyPodsFromNewRS int
scaleExpected bool
expectedOldReplicas int
}{
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(0),
oldReplicas: 10,
newReplicas: 0,
readyPodsFromOldRS: 10,
readyPodsFromNewRS: 0,
scaleExpected: true,
expectedOldReplicas: 9,
},
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
oldReplicas: 10,
newReplicas: 0,
readyPodsFromOldRS: 10,
readyPodsFromNewRS: 0,
scaleExpected: true,
expectedOldReplicas: 8,
},
{ // expect unhealthy replicas from old replica sets been cleaned up
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
oldReplicas: 10,
newReplicas: 0,
readyPodsFromOldRS: 8,
readyPodsFromNewRS: 0,
scaleExpected: true,
expectedOldReplicas: 8,
},
{ // expect 1 unhealthy replica from old replica sets been cleaned up, and 1 ready pod been scaled down
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
oldReplicas: 10,
newReplicas: 0,
readyPodsFromOldRS: 9,
readyPodsFromNewRS: 0,
scaleExpected: true,
expectedOldReplicas: 8,
},
{ // the unavailable pods from the newRS would not make us scale down old RSs in a further step
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
oldReplicas: 8,
newReplicas: 2,
readyPodsFromOldRS: 8,
readyPodsFromNewRS: 0,
scaleExpected: false,
},
}
for i := range tests {
test := tests[i]
t.Logf("executing scenario %d", i)
newSelector := map[string]string{"foo": "new"}
oldSelector := map[string]string{"foo": "old"}
newRS := rs("foo-new", test.newReplicas, newSelector, noTimestamp)
newRS.Status.AvailableReplicas = int32(test.readyPodsFromNewRS)
oldRS := rs("foo-old", test.oldReplicas, oldSelector, noTimestamp)
oldRS.Status.AvailableReplicas = int32(test.readyPodsFromOldRS)
oldRSs := []*extensions.ReplicaSet{oldRS}
allRSs := []*extensions.ReplicaSet{oldRS, newRS}
maxSurge := intstr.FromInt(0)
deployment := newDeployment("foo", test.deploymentReplicas, nil, &maxSurge, &test.maxUnavailable, newSelector)
fakeClientset := fake.Clientset{}
controller := &DeploymentController{
client: &fakeClientset,
eventRecorder: &record.FakeRecorder{},
}
scaled, err := controller.reconcileOldReplicaSets(allRSs, oldRSs, newRS, deployment)
if err != nil {
t.Errorf("unexpected error: %v", err)
continue
}
if !test.scaleExpected && scaled {
t.Errorf("unexpected scaling: %v", fakeClientset.Actions())
}
if test.scaleExpected && !scaled {
t.Errorf("expected scaling to occur")
continue
}
continue
}
}
func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) {
tests := []struct {
oldReplicas int
readyPods int
unHealthyPods int
maxCleanupCount int
cleanupCountExpected int
}{
{
oldReplicas: 10,
readyPods: 8,
unHealthyPods: 2,
maxCleanupCount: 1,
cleanupCountExpected: 1,
},
{
oldReplicas: 10,
readyPods: 8,
unHealthyPods: 2,
maxCleanupCount: 3,
cleanupCountExpected: 2,
},
{
oldReplicas: 10,
readyPods: 8,
unHealthyPods: 2,
maxCleanupCount: 0,
cleanupCountExpected: 0,
},
{
oldReplicas: 10,
readyPods: 10,
unHealthyPods: 0,
maxCleanupCount: 3,
cleanupCountExpected: 0,
},
}
for i, test := range tests {
t.Logf("executing scenario %d", i)
oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp)
oldRS.Status.AvailableReplicas = int32(test.readyPods)
oldRSs := []*extensions.ReplicaSet{oldRS}
maxSurge := intstr.FromInt(2)
maxUnavailable := intstr.FromInt(2)
deployment := newDeployment("foo", 10, nil, &maxSurge, &maxUnavailable, nil)
fakeClientset := fake.Clientset{}
controller := &DeploymentController{
client: &fakeClientset,
eventRecorder: &record.FakeRecorder{},
}
_, cleanupCount, err := controller.cleanupUnhealthyReplicas(oldRSs, deployment, int32(test.maxCleanupCount))
if err != nil {
t.Errorf("unexpected error: %v", err)
continue
}
if int(cleanupCount) != test.cleanupCountExpected {
t.Errorf("expected %v unhealthy replicas been cleaned up, got %v", test.cleanupCountExpected, cleanupCount)
continue
}
}
}
func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing.T) {
tests := []struct {
deploymentReplicas int
maxUnavailable intstr.IntOrString
readyPods int
oldReplicas int
scaleExpected bool
expectedOldReplicas int
}{
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(0),
readyPods: 10,
oldReplicas: 10,
scaleExpected: true,
expectedOldReplicas: 9,
},
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
readyPods: 10,
oldReplicas: 10,
scaleExpected: true,
expectedOldReplicas: 8,
},
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
readyPods: 8,
oldReplicas: 10,
scaleExpected: false,
},
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
readyPods: 10,
oldReplicas: 0,
scaleExpected: false,
},
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
readyPods: 1,
oldReplicas: 10,
scaleExpected: false,
},
}
for i := range tests {
test := tests[i]
t.Logf("executing scenario %d", i)
oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp)
oldRS.Status.AvailableReplicas = int32(test.readyPods)
allRSs := []*extensions.ReplicaSet{oldRS}
oldRSs := []*extensions.ReplicaSet{oldRS}
maxSurge := intstr.FromInt(0)
deployment := newDeployment("foo", test.deploymentReplicas, nil, &maxSurge, &test.maxUnavailable, map[string]string{"foo": "bar"})
fakeClientset := fake.Clientset{}
controller := &DeploymentController{
client: &fakeClientset,
eventRecorder: &record.FakeRecorder{},
}
scaled, err := controller.scaleDownOldReplicaSetsForRollingUpdate(allRSs, oldRSs, deployment)
if err != nil {
t.Errorf("unexpected error: %v", err)
continue
}
if !test.scaleExpected {
if scaled != 0 {
t.Errorf("unexpected scaling: %v", fakeClientset.Actions())
}
continue
}
if test.scaleExpected && scaled == 0 {
t.Errorf("expected scaling to occur; actions: %v", fakeClientset.Actions())
continue
}
// There are both list and update actions logged, so extract the update
// action for verification.
var updateAction core.UpdateAction
for _, action := range fakeClientset.Actions() {
switch a := action.(type) {
case core.UpdateAction:
if updateAction != nil {
t.Errorf("expected only 1 update action; had %v and found %v", updateAction, a)
} else {
updateAction = a
}
}
}
if updateAction == nil {
t.Errorf("expected an update action")
continue
}
updated := updateAction.GetObject().(*extensions.ReplicaSet)
if e, a := test.expectedOldReplicas, int(*(updated.Spec.Replicas)); e != a {
t.Errorf("expected update to %d replicas, got %d", e, a)
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,403 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"testing"
"time"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/record"
testclient "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/util/intstr"
)
func maxSurge(val int) *intstr.IntOrString {
surge := intstr.FromInt(val)
return &surge
}
func TestScale(t *testing.T) {
newTimestamp := metav1.Date(2016, 5, 20, 2, 0, 0, 0, time.UTC)
oldTimestamp := metav1.Date(2016, 5, 20, 1, 0, 0, 0, time.UTC)
olderTimestamp := metav1.Date(2016, 5, 20, 0, 0, 0, 0, time.UTC)
var updatedTemplate = func(replicas int) *extensions.Deployment {
d := newDeployment("foo", replicas, nil, nil, nil, map[string]string{"foo": "bar"})
d.Spec.Template.Labels["another"] = "label"
return d
}
tests := []struct {
name string
deployment *extensions.Deployment
oldDeployment *extensions.Deployment
newRS *extensions.ReplicaSet
oldRSs []*extensions.ReplicaSet
expectedNew *extensions.ReplicaSet
expectedOld []*extensions.ReplicaSet
wasntUpdated map[string]bool
desiredReplicasAnnotations map[string]int32
}{
{
name: "normal scaling event: 10 -> 12",
deployment: newDeployment("foo", 12, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 10, nil, nil, nil, nil),
newRS: rs("foo-v1", 10, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{},
expectedNew: rs("foo-v1", 12, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{},
},
{
name: "normal scaling event: 10 -> 5",
deployment: newDeployment("foo", 5, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 10, nil, nil, nil, nil),
newRS: rs("foo-v1", 10, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{},
expectedNew: rs("foo-v1", 5, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{},
},
{
name: "proportional scaling: 5 -> 10",
deployment: newDeployment("foo", 10, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil),
newRS: rs("foo-v2", 2, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)},
expectedNew: rs("foo-v2", 4, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)},
},
{
name: "proportional scaling: 5 -> 3",
deployment: newDeployment("foo", 3, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil),
newRS: rs("foo-v2", 2, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)},
expectedNew: rs("foo-v2", 1, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 2, nil, oldTimestamp)},
},
{
name: "proportional scaling: 9 -> 4",
deployment: newDeployment("foo", 4, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 9, nil, nil, nil, nil),
newRS: rs("foo-v2", 8, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)},
expectedNew: rs("foo-v2", 4, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 0, nil, oldTimestamp)},
},
{
name: "proportional scaling: 7 -> 10",
deployment: newDeployment("foo", 10, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 7, nil, nil, nil, nil),
newRS: rs("foo-v3", 2, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 3, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 4, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)},
},
{
name: "proportional scaling: 13 -> 8",
deployment: newDeployment("foo", 8, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 13, nil, nil, nil, nil),
newRS: rs("foo-v3", 2, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 8, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 1, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 5, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
},
// Scales up the new replica set.
{
name: "leftover distribution: 3 -> 4",
deployment: newDeployment("foo", 4, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 3, nil, nil, nil, nil),
newRS: rs("foo-v3", 1, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 2, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
},
// Scales down the older replica set.
{
name: "leftover distribution: 3 -> 2",
deployment: newDeployment("foo", 2, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 3, nil, nil, nil, nil),
newRS: rs("foo-v3", 1, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 1, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
},
// Scales up the latest replica set first.
{
name: "proportional scaling (no new rs): 4 -> 5",
deployment: newDeployment("foo", 5, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 4, nil, nil, nil, nil),
newRS: nil,
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
expectedNew: nil,
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
},
// Scales down to zero
{
name: "proportional scaling: 6 -> 0",
deployment: newDeployment("foo", 0, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 6, nil, nil, nil, nil),
newRS: rs("foo-v3", 3, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 0, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
},
// Scales up from zero
{
name: "proportional scaling: 0 -> 6",
deployment: newDeployment("foo", 6, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 6, nil, nil, nil, nil),
newRS: rs("foo-v3", 0, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 6, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
wasntUpdated: map[string]bool{"foo-v2": true, "foo-v1": true},
},
// Scenario: deployment.spec.replicas == 3 ( foo-v1.spec.replicas == foo-v2.spec.replicas == foo-v3.spec.replicas == 1 )
// Deployment is scaled to 5. foo-v3.spec.replicas and foo-v2.spec.replicas should increment by 1 but foo-v2 fails to
// update.
{
name: "failed rs update",
deployment: newDeployment("foo", 5, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil),
newRS: rs("foo-v3", 2, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 2, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
wasntUpdated: map[string]bool{"foo-v3": true, "foo-v1": true},
desiredReplicasAnnotations: map[string]int32{"foo-v2": int32(3)},
},
{
name: "deployment with surge pods",
deployment: newDeployment("foo", 20, nil, maxSurge(2), nil, nil),
oldDeployment: newDeployment("foo", 10, nil, maxSurge(2), nil, nil),
newRS: rs("foo-v2", 6, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)},
expectedNew: rs("foo-v2", 11, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 11, nil, oldTimestamp)},
},
{
name: "change both surge and size",
deployment: newDeployment("foo", 50, nil, maxSurge(6), nil, nil),
oldDeployment: newDeployment("foo", 10, nil, maxSurge(3), nil, nil),
newRS: rs("foo-v2", 5, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 8, nil, oldTimestamp)},
expectedNew: rs("foo-v2", 22, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 34, nil, oldTimestamp)},
},
{
name: "change both size and template",
deployment: updatedTemplate(14),
oldDeployment: newDeployment("foo", 10, nil, nil, nil, map[string]string{"foo": "bar"}),
newRS: nil,
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 7, nil, newTimestamp), rs("foo-v1", 3, nil, oldTimestamp)},
expectedNew: nil,
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 10, nil, newTimestamp), rs("foo-v1", 4, nil, oldTimestamp)},
},
}
for _, test := range tests {
_ = olderTimestamp
t.Log(test.name)
fake := fake.Clientset{}
dc := &DeploymentController{
client: &fake,
eventRecorder: &record.FakeRecorder{},
}
if test.newRS != nil {
desiredReplicas := *(test.oldDeployment.Spec.Replicas)
if desired, ok := test.desiredReplicasAnnotations[test.newRS.Name]; ok {
desiredReplicas = desired
}
deploymentutil.SetReplicasAnnotations(test.newRS, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment))
}
for i := range test.oldRSs {
rs := test.oldRSs[i]
if rs == nil {
continue
}
desiredReplicas := *(test.oldDeployment.Spec.Replicas)
if desired, ok := test.desiredReplicasAnnotations[rs.Name]; ok {
desiredReplicas = desired
}
deploymentutil.SetReplicasAnnotations(rs, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment))
}
if err := dc.scale(test.deployment, test.newRS, test.oldRSs); err != nil {
t.Errorf("%s: unexpected error: %v", test.name, err)
continue
}
// Construct the nameToSize map that will hold all the sizes we got our of tests
// Skip updating the map if the replica set wasn't updated since there will be
// no update action for it.
nameToSize := make(map[string]int32)
if test.newRS != nil {
nameToSize[test.newRS.Name] = *(test.newRS.Spec.Replicas)
}
for i := range test.oldRSs {
rs := test.oldRSs[i]
nameToSize[rs.Name] = *(rs.Spec.Replicas)
}
// Get all the UPDATE actions and update nameToSize with all the updated sizes.
for _, action := range fake.Actions() {
rs := action.(testclient.UpdateAction).GetObject().(*extensions.ReplicaSet)
if !test.wasntUpdated[rs.Name] {
nameToSize[rs.Name] = *(rs.Spec.Replicas)
}
}
if test.expectedNew != nil && test.newRS != nil && *(test.expectedNew.Spec.Replicas) != nameToSize[test.newRS.Name] {
t.Errorf("%s: expected new replicas: %d, got: %d", test.name, *(test.expectedNew.Spec.Replicas), nameToSize[test.newRS.Name])
continue
}
if len(test.expectedOld) != len(test.oldRSs) {
t.Errorf("%s: expected %d old replica sets, got %d", test.name, len(test.expectedOld), len(test.oldRSs))
continue
}
for n := range test.oldRSs {
rs := test.oldRSs[n]
expected := test.expectedOld[n]
if *(expected.Spec.Replicas) != nameToSize[rs.Name] {
t.Errorf("%s: expected old (%s) replicas: %d, got: %d", test.name, rs.Name, *(expected.Spec.Replicas), nameToSize[rs.Name])
}
}
}
}
func TestDeploymentController_cleanupDeployment(t *testing.T) {
selector := map[string]string{"foo": "bar"}
tests := []struct {
oldRSs []*extensions.ReplicaSet
revisionHistoryLimit int32
expectedDeletions int
}{
{
oldRSs: []*extensions.ReplicaSet{
newRSWithStatus("foo-1", 0, 0, selector),
newRSWithStatus("foo-2", 0, 0, selector),
newRSWithStatus("foo-3", 0, 0, selector),
},
revisionHistoryLimit: 1,
expectedDeletions: 2,
},
{
// Only delete the replica set with Spec.Replicas = Status.Replicas = 0.
oldRSs: []*extensions.ReplicaSet{
newRSWithStatus("foo-1", 0, 0, selector),
newRSWithStatus("foo-2", 0, 1, selector),
newRSWithStatus("foo-3", 1, 0, selector),
newRSWithStatus("foo-4", 1, 1, selector),
},
revisionHistoryLimit: 0,
expectedDeletions: 1,
},
{
oldRSs: []*extensions.ReplicaSet{
newRSWithStatus("foo-1", 0, 0, selector),
newRSWithStatus("foo-2", 0, 0, selector),
},
revisionHistoryLimit: 0,
expectedDeletions: 2,
},
{
oldRSs: []*extensions.ReplicaSet{
newRSWithStatus("foo-1", 1, 1, selector),
newRSWithStatus("foo-2", 1, 1, selector),
},
revisionHistoryLimit: 0,
expectedDeletions: 0,
},
}
for i := range tests {
test := tests[i]
fake := &fake.Clientset{}
informers := informers.NewSharedInformerFactory(fake, nil, controller.NoResyncPeriodFunc())
controller := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), fake)
controller.eventRecorder = &record.FakeRecorder{}
controller.dListerSynced = alwaysReady
controller.rsListerSynced = alwaysReady
controller.podListerSynced = alwaysReady
for _, rs := range test.oldRSs {
controller.rsLister.Indexer.Add(rs)
}
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
d := newDeployment("foo", 1, &test.revisionHistoryLimit, nil, nil, map[string]string{"foo": "bar"})
controller.cleanupDeployment(test.oldRSs, d)
gotDeletions := 0
for _, action := range fake.Actions() {
if "delete" == action.GetVerb() {
gotDeletions++
}
}
if gotDeletions != test.expectedDeletions {
t.Errorf("expect %v old replica sets been deleted, but got %v", test.expectedDeletions, gotDeletions)
continue
}
}
}

View file

@ -0,0 +1,55 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["deployment_util.go"],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/annotations:go_default_library",
"//pkg/api/meta:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/labels:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/util/errors:go_default_library",
"//pkg/util/integer:go_default_library",
"//pkg/util/intstr:go_default_library",
"//pkg/util/labels:go_default_library",
"//pkg/util/pod:go_default_library",
"//pkg/util/wait:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = ["deployment_util_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/util/intstr:go_default_library",
"//vendor:github.com/stretchr/testify/assert",
],
)

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff