1
0
Fork 0
forked from barak/tarpoon

Add glide.yaml and vendor deps

This commit is contained in:
Dalton Hubble 2016-12-03 22:43:32 -08:00
parent db918f12ad
commit 5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions

View file

@ -0,0 +1,18 @@
{
"Rules": [
{
"SelectorRegexp": "k8s[.]io/kubernetes/pkg/client/unversioned$",
"ForbiddenPrefixes": [
"k8s.io/kubernetes/pkg/client/unversioned"
]
},
{
"SelectorRegexp": "k8s[.]io/kubernetes/pkg/client/unversioned/testclient$",
"ForbiddenPrefixes": [
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
]
}
]
}

74
vendor/k8s.io/kubernetes/pkg/controller/BUILD generated vendored Normal file
View file

@ -0,0 +1,74 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"client_builder.go",
"controller_ref_manager.go",
"controller_utils.go",
"doc.go",
"lookup_cache.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/errors:go_default_library",
"//pkg/api/meta:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/api/validation:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/core/v1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/client/restclient:go_default_library",
"//pkg/conversion:go_default_library",
"//pkg/fields:go_default_library",
"//pkg/labels:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/runtime/schema:go_default_library",
"//pkg/serviceaccount:go_default_library",
"//pkg/util/clock:go_default_library",
"//pkg/util/hash:go_default_library",
"//pkg/util/integer:go_default_library",
"//pkg/util/sets:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/golang/glog",
"//vendor:github.com/golang/groupcache/lru",
],
)
go_test(
name = "go_default_test",
srcs = ["controller_utils_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/testapi:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apimachinery/registered:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/client/restclient:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/securitycontext:go_default_library",
"//pkg/util/clock:go_default_library",
"//pkg/util/sets:go_default_library",
"//pkg/util/testing:go_default_library",
"//pkg/util/uuid:go_default_library",
],
)

4
vendor/k8s.io/kubernetes/pkg/controller/OWNERS generated vendored Normal file
View file

@ -0,0 +1,4 @@
assignees:
- bprashanth
- derekwaynecarr
- mikedanese

View file

@ -0,0 +1,42 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"controller.go",
"controller_utils.go",
"doc.go",
"groupapprove.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/apis/certificates/v1alpha1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/certificates/v1alpha1:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/core/v1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/util/cert:go_default_library",
"//pkg/util/runtime:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/util/workqueue:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/cloudflare/cfssl/config",
"//vendor:github.com/cloudflare/cfssl/signer",
"//vendor:github.com/cloudflare/cfssl/signer/local",
"//vendor:github.com/golang/glog",
],
)

View file

@ -0,0 +1,223 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certificates
import (
"fmt"
"time"
"k8s.io/kubernetes/pkg/api/v1"
certificates "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/runtime"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/util/workqueue"
"k8s.io/kubernetes/pkg/watch"
"github.com/cloudflare/cfssl/config"
"github.com/cloudflare/cfssl/signer"
"github.com/cloudflare/cfssl/signer/local"
"github.com/golang/glog"
)
type AutoApprover interface {
AutoApprove(csr *certificates.CertificateSigningRequest) (*certificates.CertificateSigningRequest, error)
}
type CertificateController struct {
kubeClient clientset.Interface
// CSR framework and store
csrController *cache.Controller
csrStore cache.StoreToCertificateRequestLister
syncHandler func(csrKey string) error
approver AutoApprover
signer *local.Signer
queue workqueue.RateLimitingInterface
}
func NewCertificateController(kubeClient clientset.Interface, syncPeriod time.Duration, caCertFile, caKeyFile string, approver AutoApprover) (*CertificateController, error) {
// Send events to the apiserver
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
// Configure cfssl signer
// TODO: support non-default policy and remote/pkcs11 signing
policy := &config.Signing{
Default: config.DefaultConfig(),
}
ca, err := local.NewSignerFromFile(caCertFile, caKeyFile, policy)
if err != nil {
return nil, err
}
cc := &CertificateController{
kubeClient: kubeClient,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "certificate"),
signer: ca,
approver: approver,
}
// Manage the addition/update of certificate requests
cc.csrStore.Store, cc.csrController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return cc.kubeClient.Certificates().CertificateSigningRequests().List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return cc.kubeClient.Certificates().CertificateSigningRequests().Watch(options)
},
},
&certificates.CertificateSigningRequest{},
syncPeriod,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
csr := obj.(*certificates.CertificateSigningRequest)
glog.V(4).Infof("Adding certificate request %s", csr.Name)
cc.enqueueCertificateRequest(obj)
},
UpdateFunc: func(old, new interface{}) {
oldCSR := old.(*certificates.CertificateSigningRequest)
glog.V(4).Infof("Updating certificate request %s", oldCSR.Name)
cc.enqueueCertificateRequest(new)
},
DeleteFunc: func(obj interface{}) {
csr, ok := obj.(*certificates.CertificateSigningRequest)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.V(2).Infof("Couldn't get object from tombstone %#v", obj)
return
}
csr, ok = tombstone.Obj.(*certificates.CertificateSigningRequest)
if !ok {
glog.V(2).Infof("Tombstone contained object that is not a CSR: %#v", obj)
return
}
}
glog.V(4).Infof("Deleting certificate request %s", csr.Name)
cc.enqueueCertificateRequest(obj)
},
},
)
cc.syncHandler = cc.maybeSignCertificate
return cc, nil
}
// Run the main goroutine responsible for watching and syncing jobs.
func (cc *CertificateController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer cc.queue.ShutDown()
go cc.csrController.Run(stopCh)
glog.Infof("Starting certificate controller manager")
for i := 0; i < workers; i++ {
go wait.Until(cc.worker, time.Second, stopCh)
}
<-stopCh
glog.Infof("Shutting down certificate controller")
}
// worker runs a thread that dequeues CSRs, handles them, and marks them done.
func (cc *CertificateController) worker() {
for cc.processNextWorkItem() {
}
}
// processNextWorkItem deals with one key off the queue. It returns false when it's time to quit.
func (cc *CertificateController) processNextWorkItem() bool {
cKey, quit := cc.queue.Get()
if quit {
return false
}
defer cc.queue.Done(cKey)
err := cc.syncHandler(cKey.(string))
if err == nil {
cc.queue.Forget(cKey)
return true
}
cc.queue.AddRateLimited(cKey)
utilruntime.HandleError(fmt.Errorf("Sync %v failed with : %v", cKey, err))
return true
}
func (cc *CertificateController) enqueueCertificateRequest(obj interface{}) {
key, err := controller.KeyFunc(obj)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
return
}
cc.queue.Add(key)
}
// maybeSignCertificate will inspect the certificate request and, if it has
// been approved and meets policy expectations, generate an X509 cert using the
// cluster CA assets. If successful it will update the CSR approve subresource
// with the signed certificate.
func (cc *CertificateController) maybeSignCertificate(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing certificate request %q (%v)", key, time.Now().Sub(startTime))
}()
obj, exists, err := cc.csrStore.Store.GetByKey(key)
if err != nil {
return err
}
if !exists {
glog.V(3).Infof("csr has been deleted: %v", key)
return nil
}
csr := obj.(*certificates.CertificateSigningRequest)
if cc.approver != nil {
csr, err = cc.approver.AutoApprove(csr)
if err != nil {
return fmt.Errorf("error auto approving csr: %v", err)
}
}
// At this point, the controller needs to:
// 1. Check the approval conditions
// 2. Generate a signed certificate
// 3. Update the Status subresource
if csr.Status.Certificate == nil && IsCertificateRequestApproved(csr) {
pemBytes := csr.Spec.Request
req := signer.SignRequest{Request: string(pemBytes)}
certBytes, err := cc.signer.Sign(req)
if err != nil {
return err
}
csr.Status.Certificate = certBytes
}
_, err = cc.kubeClient.Certificates().CertificateSigningRequests().UpdateStatus(csr)
return err
}

View file

@ -0,0 +1,45 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certificates
import certificates "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1"
// IsCertificateRequestApproved returns true if a certificate request has the
// "Approved" condition and no "Denied" conditions; false otherwise.
func IsCertificateRequestApproved(csr *certificates.CertificateSigningRequest) bool {
approved, denied := getCertApprovalCondition(&csr.Status)
return approved && !denied
}
// IsCertificateRequestDenied returns true if a certificate request has the
// "Denied" conditions; false otherwise.
func IsCertificateRequestDenied(csr *certificates.CertificateSigningRequest) bool {
_, denied := getCertApprovalCondition(&csr.Status)
return denied
}
func getCertApprovalCondition(status *certificates.CertificateSigningRequestStatus) (approved bool, denied bool) {
for _, c := range status.Conditions {
if c.Type == certificates.CertificateApproved {
approved = true
}
if c.Type == certificates.CertificateDenied {
denied = true
}
}
return
}

View file

@ -0,0 +1,19 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package certificates contains logic for watching and synchronizing
// CertificateSigningRequests.
package certificates

View file

@ -0,0 +1,86 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certificates
import (
"fmt"
"reflect"
"strings"
certificates "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1"
clientcertificates "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/certificates/v1alpha1"
certutil "k8s.io/kubernetes/pkg/util/cert"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
)
// groupApprover implements AutoApprover for signing Kubelet certificates.
type groupApprover struct {
client clientcertificates.CertificateSigningRequestInterface
approveAllKubeletCSRsForGroup string
}
// NewGroupApprover creates an approver that accepts any CSR requests where the subject group contains approveAllKubeletCSRsForGroup.
func NewGroupApprover(client clientcertificates.CertificateSigningRequestInterface, approveAllKubeletCSRsForGroup string) AutoApprover {
return &groupApprover{
client: client,
approveAllKubeletCSRsForGroup: approveAllKubeletCSRsForGroup,
}
}
func (cc *groupApprover) AutoApprove(csr *certificates.CertificateSigningRequest) (*certificates.CertificateSigningRequest, error) {
// short-circuit if we're not auto-approving
if cc.approveAllKubeletCSRsForGroup == "" {
return csr, nil
}
// short-circuit if we're already approved or denied
if approved, denied := getCertApprovalCondition(&csr.Status); approved || denied {
return csr, nil
}
isKubeletBootstrapGroup := false
for _, g := range csr.Spec.Groups {
if g == cc.approveAllKubeletCSRsForGroup {
isKubeletBootstrapGroup = true
break
}
}
if !isKubeletBootstrapGroup {
return csr, nil
}
x509cr, err := certutil.ParseCSRV1alpha1(csr)
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to parse csr %q: %v", csr.Name, err))
return csr, nil
}
if !reflect.DeepEqual([]string{"system:nodes"}, x509cr.Subject.Organization) {
return csr, nil
}
if !strings.HasPrefix(x509cr.Subject.CommonName, "system:node:") {
return csr, nil
}
if len(x509cr.DNSNames)+len(x509cr.EmailAddresses)+len(x509cr.IPAddresses) != 0 {
return csr, nil
}
csr.Status.Conditions = append(csr.Status.Conditions, certificates.CertificateSigningRequestCondition{
Type: certificates.CertificateApproved,
Reason: "AutoApproved",
Message: "Auto approving of all kubelet CSRs is enabled on the controller manager",
})
return cc.client.UpdateApproval(csr)
}

View file

@ -0,0 +1,184 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"fmt"
"time"
"k8s.io/kubernetes/pkg/api"
apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/serviceaccount"
"k8s.io/kubernetes/pkg/watch"
"github.com/golang/glog"
)
// ControllerClientBuilder allow syou to get clients and configs for controllers
type ControllerClientBuilder interface {
Config(name string) (*restclient.Config, error)
ConfigOrDie(name string) *restclient.Config
Client(name string) (clientset.Interface, error)
ClientOrDie(name string) clientset.Interface
}
// SimpleControllerClientBuilder returns a fixed client with different user agents
type SimpleControllerClientBuilder struct {
// ClientConfig is a skeleton config to clone and use as the basis for each controller client
ClientConfig *restclient.Config
}
func (b SimpleControllerClientBuilder) Config(name string) (*restclient.Config, error) {
clientConfig := *b.ClientConfig
return restclient.AddUserAgent(&clientConfig, name), nil
}
func (b SimpleControllerClientBuilder) ConfigOrDie(name string) *restclient.Config {
clientConfig, err := b.Config(name)
if err != nil {
glog.Fatal(err)
}
return clientConfig
}
func (b SimpleControllerClientBuilder) Client(name string) (clientset.Interface, error) {
clientConfig, err := b.Config(name)
if err != nil {
return nil, err
}
return clientset.NewForConfig(clientConfig)
}
func (b SimpleControllerClientBuilder) ClientOrDie(name string) clientset.Interface {
client, err := b.Client(name)
if err != nil {
glog.Fatal(err)
}
return client
}
// SAControllerClientBuilder is a ControllerClientBuilder that returns clients identifying as
// service accounts
type SAControllerClientBuilder struct {
// ClientConfig is a skeleton config to clone and use as the basis for each controller client
ClientConfig *restclient.Config
// CoreClient is used to provision service accounts if needed and watch for their associated tokens
// to construct a controller client
CoreClient v1core.CoreV1Interface
// Namespace is the namespace used to host the service accounts that will back the
// controllers. It must be highly privileged namespace which normal users cannot inspect.
Namespace string
}
// config returns a complete clientConfig for constructing clients. This is separate in anticipation of composition
// which means that not all clientsets are known here
func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, error) {
clientConfig := restclient.AnonymousClientConfig(b.ClientConfig)
// we need the SA UID to find a matching SA token
sa, err := b.CoreClient.ServiceAccounts(b.Namespace).Get(name)
if err != nil && !apierrors.IsNotFound(err) {
return nil, err
} else if apierrors.IsNotFound(err) {
// check to see if the namespace exists. If it isn't a NotFound, just try to create the SA.
// It'll probably fail, but perhaps that will have a better message.
if _, err := b.CoreClient.Namespaces().Get(b.Namespace); apierrors.IsNotFound(err) {
_, err = b.CoreClient.Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: b.Namespace}})
if err != nil && !apierrors.IsAlreadyExists(err) {
return nil, err
}
}
sa, err = b.CoreClient.ServiceAccounts(b.Namespace).Create(
&v1.ServiceAccount{ObjectMeta: v1.ObjectMeta{Namespace: b.Namespace, Name: name}})
if err != nil {
return nil, err
}
}
lw := &cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
options.FieldSelector = fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(v1.SecretTypeServiceAccountToken)}).String()
return b.CoreClient.Secrets(b.Namespace).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
options.FieldSelector = fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(v1.SecretTypeServiceAccountToken)}).String()
return b.CoreClient.Secrets(b.Namespace).Watch(options)
},
}
_, err = cache.ListWatchUntil(30*time.Second, lw,
func(event watch.Event) (bool, error) {
switch event.Type {
case watch.Deleted:
return false, nil
case watch.Error:
return false, fmt.Errorf("error watching")
case watch.Added, watch.Modified:
secret := event.Object.(*v1.Secret)
if !serviceaccount.IsServiceAccountToken(secret, sa) ||
len(secret.Data[v1.ServiceAccountTokenKey]) == 0 {
return false, nil
}
// TODO maybe verify the token is valid
clientConfig.BearerToken = string(secret.Data[v1.ServiceAccountTokenKey])
restclient.AddUserAgent(clientConfig, serviceaccount.MakeUsername(b.Namespace, name))
return true, nil
default:
return false, fmt.Errorf("unexpected event type: %v", event.Type)
}
})
if err != nil {
return nil, fmt.Errorf("unable to get token for service account: %v", err)
}
return clientConfig, nil
}
func (b SAControllerClientBuilder) ConfigOrDie(name string) *restclient.Config {
clientConfig, err := b.Config(name)
if err != nil {
glog.Fatal(err)
}
return clientConfig
}
func (b SAControllerClientBuilder) Client(name string) (clientset.Interface, error) {
clientConfig, err := b.Config(name)
if err != nil {
return nil, err
}
return clientset.NewForConfig(clientConfig)
}
func (b SAControllerClientBuilder) ClientOrDie(name string) clientset.Interface {
client, err := b.Client(name)
if err != nil {
glog.Fatal(err)
}
return client
}

View file

@ -0,0 +1,144 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"fmt"
"strings"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime/schema"
)
type PodControllerRefManager struct {
podControl PodControlInterface
controllerObject v1.ObjectMeta
controllerSelector labels.Selector
controllerKind schema.GroupVersionKind
}
// NewPodControllerRefManager returns a PodControllerRefManager that exposes
// methods to manage the controllerRef of pods.
func NewPodControllerRefManager(
podControl PodControlInterface,
controllerObject v1.ObjectMeta,
controllerSelector labels.Selector,
controllerKind schema.GroupVersionKind,
) *PodControllerRefManager {
return &PodControllerRefManager{podControl, controllerObject, controllerSelector, controllerKind}
}
// Classify first filters out inactive pods, then it classify the remaining pods
// into three categories: 1. matchesAndControlled are the pods whose labels
// match the selector of the RC, and have a controllerRef pointing to the
// controller 2. matchesNeedsController are the pods whose labels match the RC,
// but don't have a controllerRef. (Pods with matching labels but with a
// controllerRef pointing to other object are ignored) 3. controlledDoesNotMatch
// are the pods that have a controllerRef pointing to the controller, but their
// labels no longer match the selector.
func (m *PodControllerRefManager) Classify(pods []*v1.Pod) (
matchesAndControlled []*v1.Pod,
matchesNeedsController []*v1.Pod,
controlledDoesNotMatch []*v1.Pod) {
for i := range pods {
pod := pods[i]
if !IsPodActive(pod) {
glog.V(4).Infof("Ignoring inactive pod %v/%v in state %v, deletion time %v",
pod.Namespace, pod.Name, pod.Status.Phase, pod.DeletionTimestamp)
continue
}
controllerRef := getControllerOf(pod.ObjectMeta)
if controllerRef != nil {
if controllerRef.UID == m.controllerObject.UID {
// already controlled
if m.controllerSelector.Matches(labels.Set(pod.Labels)) {
matchesAndControlled = append(matchesAndControlled, pod)
} else {
controlledDoesNotMatch = append(controlledDoesNotMatch, pod)
}
} else {
// ignoring the pod controlled by other controller
glog.V(4).Infof("Ignoring pod %v/%v, it's owned by [%s/%s, name: %s, uid: %s]",
pod.Namespace, pod.Name, controllerRef.APIVersion, controllerRef.Kind, controllerRef.Name, controllerRef.UID)
continue
}
} else {
if !m.controllerSelector.Matches(labels.Set(pod.Labels)) {
continue
}
matchesNeedsController = append(matchesNeedsController, pod)
}
}
return matchesAndControlled, matchesNeedsController, controlledDoesNotMatch
}
// getControllerOf returns the controllerRef if controllee has a controller,
// otherwise returns nil.
func getControllerOf(controllee v1.ObjectMeta) *v1.OwnerReference {
for _, owner := range controllee.OwnerReferences {
// controlled by other controller
if owner.Controller != nil && *owner.Controller == true {
return &owner
}
}
return nil
}
// AdoptPod sends a patch to take control of the pod. It returns the error if
// the patching fails.
func (m *PodControllerRefManager) AdoptPod(pod *v1.Pod) error {
// we should not adopt any pods if the controller is about to be deleted
if m.controllerObject.DeletionTimestamp != nil {
return fmt.Errorf("cancel the adopt attempt for pod %s because the controlller is being deleted",
strings.Join([]string{pod.Namespace, pod.Name, string(pod.UID)}, "_"))
}
addControllerPatch := fmt.Sprintf(
`{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true}],"uid":"%s"}}`,
m.controllerKind.GroupVersion(), m.controllerKind.Kind,
m.controllerObject.Name, m.controllerObject.UID, pod.UID)
return m.podControl.PatchPod(pod.Namespace, pod.Name, []byte(addControllerPatch))
}
// ReleasePod sends a patch to free the pod from the control of the controller.
// It returns the error if the patching fails. 404 and 422 errors are ignored.
func (m *PodControllerRefManager) ReleasePod(pod *v1.Pod) error {
glog.V(2).Infof("patching pod %s_%s to remove its controllerRef to %s/%s:%s",
pod.Namespace, pod.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.controllerObject.Name)
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.controllerObject.UID, pod.UID)
err := m.podControl.PatchPod(pod.Namespace, pod.Name, []byte(deleteOwnerRefPatch))
if err != nil {
if errors.IsNotFound(err) {
// If the pod no longer exists, ignore it.
return nil
}
if errors.IsInvalid(err) {
// Invalid error will be returned in two cases: 1. the pod
// has no owner reference, 2. the uid of the pod doesn't
// match, which means the pod is deleted and then recreated.
// In both cases, the error can be ignored.
// TODO: If the pod has owner references, but none of them
// has the owner.UID, server will silently ignore the patch.
// Investigate why.
return nil
}
}
return err
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,383 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"encoding/json"
"fmt"
"math/rand"
"net/http/httptest"
"reflect"
"sort"
"sync"
"testing"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/securitycontext"
"k8s.io/kubernetes/pkg/util/clock"
"k8s.io/kubernetes/pkg/util/sets"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
"k8s.io/kubernetes/pkg/util/uuid"
)
// NewFakeControllerExpectationsLookup creates a fake store for PodExpectations.
func NewFakeControllerExpectationsLookup(ttl time.Duration) (*ControllerExpectations, *clock.FakeClock) {
fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
fakeClock := clock.NewFakeClock(fakeTime)
ttlPolicy := &cache.TTLPolicy{Ttl: ttl, Clock: fakeClock}
ttlStore := cache.NewFakeExpirationStore(
ExpKeyFunc, nil, ttlPolicy, fakeClock)
return &ControllerExpectations{ttlStore}, fakeClock
}
func newReplicationController(replicas int) *v1.ReplicationController {
rc := &v1.ReplicationController{
TypeMeta: metav1.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
ObjectMeta: v1.ObjectMeta{
UID: uuid.NewUUID(),
Name: "foobar",
Namespace: v1.NamespaceDefault,
ResourceVersion: "18",
},
Spec: v1.ReplicationControllerSpec{
Replicas: func() *int32 { i := int32(replicas); return &i }(),
Selector: map[string]string{"foo": "bar"},
Template: &v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"name": "foo",
"type": "production",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Image: "foo/bar",
TerminationMessagePath: v1.TerminationMessagePathDefault,
ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
},
},
RestartPolicy: v1.RestartPolicyAlways,
DNSPolicy: v1.DNSDefault,
NodeSelector: map[string]string{
"baz": "blah",
},
},
},
},
}
return rc
}
// create count pods with the given phase for the given rc (same selectors and namespace), and add them to the store.
func newPodList(store cache.Store, count int, status v1.PodPhase, rc *v1.ReplicationController) *v1.PodList {
pods := []v1.Pod{}
for i := 0; i < count; i++ {
newPod := v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: fmt.Sprintf("pod%d", i),
Labels: rc.Spec.Selector,
Namespace: rc.Namespace,
},
Status: v1.PodStatus{Phase: status},
}
if store != nil {
store.Add(&newPod)
}
pods = append(pods, newPod)
}
return &v1.PodList{
Items: pods,
}
}
func TestControllerExpectations(t *testing.T) {
ttl := 30 * time.Second
e, fakeClock := NewFakeControllerExpectationsLookup(ttl)
// In practice we can't really have add and delete expectations since we only either create or
// delete replicas in one rc pass, and the rc goes to sleep soon after until the expectations are
// either fulfilled or timeout.
adds, dels := 10, 30
rc := newReplicationController(1)
// RC fires off adds and deletes at apiserver, then sets expectations
rcKey, err := KeyFunc(rc)
if err != nil {
t.Errorf("Couldn't get key for object %#v: %v", rc, err)
}
e.SetExpectations(rcKey, adds, dels)
var wg sync.WaitGroup
for i := 0; i < adds+1; i++ {
wg.Add(1)
go func() {
// In prod this can happen either because of a failed create by the rc
// or after having observed a create via informer
e.CreationObserved(rcKey)
wg.Done()
}()
}
wg.Wait()
// There are still delete expectations
if e.SatisfiedExpectations(rcKey) {
t.Errorf("Rc will sync before expectations are met")
}
for i := 0; i < dels+1; i++ {
wg.Add(1)
go func() {
e.DeletionObserved(rcKey)
wg.Done()
}()
}
wg.Wait()
// Expectations have been surpassed
if podExp, exists, err := e.GetExpectations(rcKey); err == nil && exists {
add, del := podExp.GetExpectations()
if add != -1 || del != -1 {
t.Errorf("Unexpected pod expectations %#v", podExp)
}
} else {
t.Errorf("Could not get expectations for rc, exists %v and err %v", exists, err)
}
if !e.SatisfiedExpectations(rcKey) {
t.Errorf("Expectations are met but the rc will not sync")
}
// Next round of rc sync, old expectations are cleared
e.SetExpectations(rcKey, 1, 2)
if podExp, exists, err := e.GetExpectations(rcKey); err == nil && exists {
add, del := podExp.GetExpectations()
if add != 1 || del != 2 {
t.Errorf("Unexpected pod expectations %#v", podExp)
}
} else {
t.Errorf("Could not get expectations for rc, exists %v and err %v", exists, err)
}
// Expectations have expired because of ttl
fakeClock.Step(ttl + 1)
if !e.SatisfiedExpectations(rcKey) {
t.Errorf("Expectations should have expired but didn't")
}
}
func TestUIDExpectations(t *testing.T) {
uidExp := NewUIDTrackingControllerExpectations(NewControllerExpectations())
rcList := []*v1.ReplicationController{
newReplicationController(2),
newReplicationController(1),
newReplicationController(0),
newReplicationController(5),
}
rcToPods := map[string][]string{}
rcKeys := []string{}
for i := range rcList {
rc := rcList[i]
rcName := fmt.Sprintf("rc-%v", i)
rc.Name = rcName
rc.Spec.Selector[rcName] = rcName
podList := newPodList(nil, 5, v1.PodRunning, rc)
rcKey, err := KeyFunc(rc)
if err != nil {
t.Fatalf("Couldn't get key for object %#v: %v", rc, err)
}
rcKeys = append(rcKeys, rcKey)
rcPodNames := []string{}
for i := range podList.Items {
p := &podList.Items[i]
p.Name = fmt.Sprintf("%v-%v", p.Name, rc.Name)
rcPodNames = append(rcPodNames, PodKey(p))
}
rcToPods[rcKey] = rcPodNames
uidExp.ExpectDeletions(rcKey, rcPodNames)
}
for i := range rcKeys {
j := rand.Intn(i + 1)
rcKeys[i], rcKeys[j] = rcKeys[j], rcKeys[i]
}
for _, rcKey := range rcKeys {
if uidExp.SatisfiedExpectations(rcKey) {
t.Errorf("Controller %v satisfied expectations before deletion", rcKey)
}
for _, p := range rcToPods[rcKey] {
uidExp.DeletionObserved(rcKey, p)
}
if !uidExp.SatisfiedExpectations(rcKey) {
t.Errorf("Controller %v didn't satisfy expectations after deletion", rcKey)
}
uidExp.DeleteExpectations(rcKey)
if uidExp.GetUIDs(rcKey) != nil {
t.Errorf("Failed to delete uid expectations for %v", rcKey)
}
}
}
func TestCreatePods(t *testing.T) {
ns := v1.NamespaceDefault
body := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "empty_pod"}})
fakeHandler := utiltesting.FakeHandler{
StatusCode: 200,
ResponseBody: string(body),
}
testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close()
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
podControl := RealPodControl{
KubeClient: clientset,
Recorder: &record.FakeRecorder{},
}
controllerSpec := newReplicationController(1)
// Make sure createReplica sends a POST to the apiserver with a pod from the controllers pod template
if err := podControl.CreatePods(ns, controllerSpec.Spec.Template, controllerSpec); err != nil {
t.Fatalf("unexpected error: %v", err)
}
expectedPod := v1.Pod{
ObjectMeta: v1.ObjectMeta{
Labels: controllerSpec.Spec.Template.Labels,
GenerateName: fmt.Sprintf("%s-", controllerSpec.Name),
},
Spec: controllerSpec.Spec.Template.Spec,
}
fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath("pods", v1.NamespaceDefault, ""), "POST", nil)
var actualPod = &v1.Pod{}
err := json.Unmarshal([]byte(fakeHandler.RequestBody), actualPod)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if !api.Semantic.DeepDerivative(&expectedPod, actualPod) {
t.Logf("Body: %s", fakeHandler.RequestBody)
t.Errorf("Unexpected mismatch. Expected\n %#v,\n Got:\n %#v", &expectedPod, actualPod)
}
}
func TestActivePodFiltering(t *testing.T) {
// This rc is not needed by the test, only the newPodList to give the pods labels/a namespace.
rc := newReplicationController(0)
podList := newPodList(nil, 5, v1.PodRunning, rc)
podList.Items[0].Status.Phase = v1.PodSucceeded
podList.Items[1].Status.Phase = v1.PodFailed
expectedNames := sets.NewString()
for _, pod := range podList.Items[2:] {
expectedNames.Insert(pod.Name)
}
var podPointers []*v1.Pod
for i := range podList.Items {
podPointers = append(podPointers, &podList.Items[i])
}
got := FilterActivePods(podPointers)
gotNames := sets.NewString()
for _, pod := range got {
gotNames.Insert(pod.Name)
}
if expectedNames.Difference(gotNames).Len() != 0 || gotNames.Difference(expectedNames).Len() != 0 {
t.Errorf("expected %v, got %v", expectedNames.List(), gotNames.List())
}
}
func TestSortingActivePods(t *testing.T) {
numPods := 9
// This rc is not needed by the test, only the newPodList to give the pods labels/a namespace.
rc := newReplicationController(0)
podList := newPodList(nil, numPods, v1.PodRunning, rc)
pods := make([]*v1.Pod, len(podList.Items))
for i := range podList.Items {
pods[i] = &podList.Items[i]
}
// pods[0] is not scheduled yet.
pods[0].Spec.NodeName = ""
pods[0].Status.Phase = v1.PodPending
// pods[1] is scheduled but pending.
pods[1].Spec.NodeName = "bar"
pods[1].Status.Phase = v1.PodPending
// pods[2] is unknown.
pods[2].Spec.NodeName = "foo"
pods[2].Status.Phase = v1.PodUnknown
// pods[3] is running but not ready.
pods[3].Spec.NodeName = "foo"
pods[3].Status.Phase = v1.PodRunning
// pods[4] is running and ready but without LastTransitionTime.
now := metav1.Now()
pods[4].Spec.NodeName = "foo"
pods[4].Status.Phase = v1.PodRunning
pods[4].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}
pods[4].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}}
// pods[5] is running and ready and with LastTransitionTime.
pods[5].Spec.NodeName = "foo"
pods[5].Status.Phase = v1.PodRunning
pods[5].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: now}}
pods[5].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}}
// pods[6] is running ready for a longer time than pods[5].
then := metav1.Time{Time: now.AddDate(0, -1, 0)}
pods[6].Spec.NodeName = "foo"
pods[6].Status.Phase = v1.PodRunning
pods[6].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: then}}
pods[6].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}}
// pods[7] has lower container restart count than pods[6].
pods[7].Spec.NodeName = "foo"
pods[7].Status.Phase = v1.PodRunning
pods[7].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: then}}
pods[7].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 2}, {RestartCount: 1}}
pods[7].CreationTimestamp = now
// pods[8] is older than pods[7].
pods[8].Spec.NodeName = "foo"
pods[8].Status.Phase = v1.PodRunning
pods[8].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: then}}
pods[8].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 2}, {RestartCount: 1}}
pods[8].CreationTimestamp = then
getOrder := func(pods []*v1.Pod) []string {
names := make([]string, len(pods))
for i := range pods {
names[i] = pods[i].Name
}
return names
}
expected := getOrder(pods)
for i := 0; i < 20; i++ {
idx := rand.Perm(numPods)
randomizedPods := make([]*v1.Pod, numPods)
for j := 0; j < numPods; j++ {
randomizedPods[j] = pods[idx[j]]
}
sort.Sort(ActivePods(randomizedPods))
actual := getOrder(randomizedPods)
if !reflect.DeepEqual(actual, expected) {
t.Errorf("expected %v, got %v", expected, actual)
}
}
}

59
vendor/k8s.io/kubernetes/pkg/controller/cronjob/BUILD generated vendored Normal file
View file

@ -0,0 +1,59 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"controller.go",
"doc.go",
"injection.go",
"utils.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/errors:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/batch/v2alpha1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/core/v1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/labels:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/runtime/schema:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/errors:go_default_library",
"//pkg/util/metrics:go_default_library",
"//pkg/util/runtime:go_default_library",
"//pkg/util/wait:go_default_library",
"//vendor:github.com/golang/glog",
"//vendor:github.com/robfig/cron",
],
)
go_test(
name = "go_default_test",
srcs = [
"controller_test.go",
"utils_test.go",
],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/apis/batch/v2alpha1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/types:go_default_library",
],
)

View file

@ -0,0 +1,297 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cronjob
/*
I did not use watch or expectations. Those add a lot of corner cases, and we aren't
expecting a large volume of jobs or scheduledJobs. (We are favoring correctness
over scalability. If we find a single controller thread is too slow because
there are a lot of Jobs or CronJobs, we we can parallelize by Namespace.
If we find the load on the API server is too high, we can use a watch and
UndeltaStore.)
Just periodically list jobs and SJs, and then reconcile them.
*/
import (
"fmt"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/runtime"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
"k8s.io/kubernetes/pkg/util/metrics"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/wait"
)
// Utilities for dealing with Jobs and CronJobs and time.
type CronJobController struct {
kubeClient clientset.Interface
jobControl jobControlInterface
sjControl sjControlInterface
podControl podControlInterface
recorder record.EventRecorder
}
func NewCronJobController(kubeClient clientset.Interface) *CronJobController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset.
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("cronjob_controller", kubeClient.Core().RESTClient().GetRateLimiter())
}
jm := &CronJobController{
kubeClient: kubeClient,
jobControl: realJobControl{KubeClient: kubeClient},
sjControl: &realSJControl{KubeClient: kubeClient},
podControl: &realPodControl{KubeClient: kubeClient},
recorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "cronjob-controller"}),
}
return jm
}
func NewCronJobControllerFromClient(kubeClient clientset.Interface) *CronJobController {
jm := NewCronJobController(kubeClient)
return jm
}
// Run the main goroutine responsible for watching and syncing jobs.
func (jm *CronJobController) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
glog.Infof("Starting CronJob Manager")
// Check things every 10 second.
go wait.Until(jm.SyncAll, 10*time.Second, stopCh)
<-stopCh
glog.Infof("Shutting down CronJob Manager")
}
// SyncAll lists all the CronJobs and Jobs and reconciles them.
func (jm *CronJobController) SyncAll() {
sjl, err := jm.kubeClient.BatchV2alpha1().CronJobs(v1.NamespaceAll).List(v1.ListOptions{})
if err != nil {
glog.Errorf("Error listing cronjobs: %v", err)
return
}
sjs := sjl.Items
glog.V(4).Infof("Found %d cronjobs", len(sjs))
jl, err := jm.kubeClient.BatchV2alpha1().Jobs(v1.NamespaceAll).List(v1.ListOptions{})
if err != nil {
glog.Errorf("Error listing jobs")
return
}
js := jl.Items
glog.V(4).Infof("Found %d jobs", len(js))
jobsBySj := groupJobsByParent(sjs, js)
glog.V(4).Infof("Found %d groups", len(jobsBySj))
for _, sj := range sjs {
SyncOne(sj, jobsBySj[sj.UID], time.Now(), jm.jobControl, jm.sjControl, jm.podControl, jm.recorder)
}
}
// SyncOne reconciles a CronJob with a list of any Jobs that it created.
// All known jobs created by "sj" should be included in "js".
// The current time is passed in to facilitate testing.
// It has no receiver, to facilitate testing.
func SyncOne(sj batch.CronJob, js []batch.Job, now time.Time, jc jobControlInterface, sjc sjControlInterface, pc podControlInterface, recorder record.EventRecorder) {
nameForLog := fmt.Sprintf("%s/%s", sj.Namespace, sj.Name)
for i := range js {
j := js[i]
found := inActiveList(sj, j.ObjectMeta.UID)
if !found && !IsJobFinished(&j) {
recorder.Eventf(&sj, v1.EventTypeWarning, "UnexpectedJob", "Saw a job that the controller did not create or forgot: %v", j.Name)
// We found an unfinished job that has us as the parent, but it is not in our Active list.
// This could happen if we crashed right after creating the Job and before updating the status,
// or if our jobs list is newer than our sj status after a relist, or if someone intentionally created
// a job that they wanted us to adopt.
// TODO: maybe handle the adoption case? Concurrency/suspend rules will not apply in that case, obviously, since we can't
// stop users from creating jobs if they have permission. It is assumed that if a
// user has permission to create a job within a namespace, then they have permission to make any scheduledJob
// in the same namespace "adopt" that job. ReplicaSets and their Pods work the same way.
// TBS: how to update sj.Status.LastScheduleTime if the adopted job is newer than any we knew about?
} else if found && IsJobFinished(&j) {
deleteFromActiveList(&sj, j.ObjectMeta.UID)
// TODO: event to call out failure vs success.
recorder.Eventf(&sj, v1.EventTypeNormal, "SawCompletedJob", "Saw completed job: %v", j.Name)
}
}
updatedSJ, err := sjc.UpdateStatus(&sj)
if err != nil {
glog.Errorf("Unable to update status for %s (rv = %s): %v", nameForLog, sj.ResourceVersion, err)
}
sj = *updatedSJ
if sj.Spec.Suspend != nil && *sj.Spec.Suspend {
glog.V(4).Infof("Not starting job for %s because it is suspended", nameForLog)
return
}
times, err := getRecentUnmetScheduleTimes(sj, now)
if err != nil {
glog.Errorf("Cannot determine if %s needs to be started: %v", nameForLog, err)
}
// TODO: handle multiple unmet start times, from oldest to newest, updating status as needed.
if len(times) == 0 {
glog.V(4).Infof("No unmet start times for %s", nameForLog)
return
}
if len(times) > 1 {
glog.V(4).Infof("Multiple unmet start times for %s so only starting last one", nameForLog)
}
scheduledTime := times[len(times)-1]
tooLate := false
if sj.Spec.StartingDeadlineSeconds != nil {
tooLate = scheduledTime.Add(time.Second * time.Duration(*sj.Spec.StartingDeadlineSeconds)).Before(now)
}
if tooLate {
glog.V(4).Infof("Missed starting window for %s", nameForLog)
// TODO: generate an event for a miss. Use a warning level event because it indicates a
// problem with the controller (restart or long queue), and is not expected by user either.
// Since we don't set LastScheduleTime when not scheduling, we are going to keep noticing
// the miss every cycle. In order to avoid sending multiple events, and to avoid processing
// the sj again and again, we could set a Status.LastMissedTime when we notice a miss.
// Then, when we call getRecentUnmetScheduleTimes, we can take max(creationTimestamp,
// Status.LastScheduleTime, Status.LastMissedTime), and then so we won't generate
// and event the next time we process it, and also so the user looking at the status
// can see easily that there was a missed execution.
return
}
if sj.Spec.ConcurrencyPolicy == batch.ForbidConcurrent && len(sj.Status.Active) > 0 {
// Regardless which source of information we use for the set of active jobs,
// there is some risk that we won't see an active job when there is one.
// (because we haven't seen the status update to the SJ or the created pod).
// So it is theoretically possible to have concurrency with Forbid.
// As long the as the invokations are "far enough apart in time", this usually won't happen.
//
// TODO: for Forbid, we could use the same name for every execution, as a lock.
// With replace, we could use a name that is deterministic per execution time.
// But that would mean that you could not inspect prior successes or failures of Forbid jobs.
glog.V(4).Infof("Not starting job for %s because of prior execution still running and concurrency policy is Forbid", nameForLog)
return
}
if sj.Spec.ConcurrencyPolicy == batch.ReplaceConcurrent {
for _, j := range sj.Status.Active {
// TODO: this should be replaced with server side job deletion
// currently this mimics JobReaper from pkg/kubectl/stop.go
glog.V(4).Infof("Deleting job %s of %s that was still running at next scheduled start time", j.Name, nameForLog)
job, err := jc.GetJob(j.Namespace, j.Name)
if err != nil {
recorder.Eventf(&sj, v1.EventTypeWarning, "FailedGet", "Get job: %v", err)
return
}
// scale job down to 0
if *job.Spec.Parallelism != 0 {
zero := int32(0)
job.Spec.Parallelism = &zero
job, err = jc.UpdateJob(job.Namespace, job)
if err != nil {
recorder.Eventf(&sj, v1.EventTypeWarning, "FailedUpdate", "Update job: %v", err)
return
}
}
// remove all pods...
selector, _ := metav1.LabelSelectorAsSelector(job.Spec.Selector)
options := v1.ListOptions{LabelSelector: selector.String()}
podList, err := pc.ListPods(job.Namespace, options)
if err != nil {
recorder.Eventf(&sj, v1.EventTypeWarning, "FailedList", "List job-pods: %v", err)
}
errList := []error{}
for _, pod := range podList.Items {
glog.V(2).Infof("CronJob controller is deleting Pod %v/%v", pod.Namespace, pod.Name)
if err := pc.DeletePod(pod.Namespace, pod.Name); err != nil {
// ignores the error when the pod isn't found
if !errors.IsNotFound(err) {
errList = append(errList, err)
}
}
}
if len(errList) != 0 {
recorder.Eventf(&sj, v1.EventTypeWarning, "FailedDelete", "Deleted job-pods: %v", utilerrors.NewAggregate(errList))
return
}
// ... the job itself...
if err := jc.DeleteJob(job.Namespace, job.Name); err != nil {
recorder.Eventf(&sj, v1.EventTypeWarning, "FailedDelete", "Deleted job: %v", err)
glog.Errorf("Error deleting job %s from %s: %v", job.Name, nameForLog, err)
return
}
// ... and its reference from active list
deleteFromActiveList(&sj, job.ObjectMeta.UID)
recorder.Eventf(&sj, v1.EventTypeNormal, "SuccessfulDelete", "Deleted job %v", j.Name)
}
}
jobReq, err := getJobFromTemplate(&sj, scheduledTime)
if err != nil {
glog.Errorf("Unable to make Job from template in %s: %v", nameForLog, err)
return
}
jobResp, err := jc.CreateJob(sj.Namespace, jobReq)
if err != nil {
recorder.Eventf(&sj, v1.EventTypeWarning, "FailedCreate", "Error creating job: %v", err)
return
}
glog.V(4).Infof("Created Job %s for %s", jobResp.Name, nameForLog)
recorder.Eventf(&sj, v1.EventTypeNormal, "SuccessfulCreate", "Created job %v", jobResp.Name)
// ------------------------------------------------------------------ //
// If this process restarts at this point (after posting a job, but
// before updating the status), then we might try to start the job on
// the next time. Actually, if we relist the SJs and Jobs on the next
// iteration of SyncAll, we might not see our own status update, and
// then post one again. So, we need to use the job name as a lock to
// prevent us from making the job twice (name the job with hash of its
// scheduled time).
// Add the just-started job to the status list.
ref, err := getRef(jobResp)
if err != nil {
glog.V(2).Infof("Unable to make object reference for job for %s", nameForLog)
} else {
sj.Status.Active = append(sj.Status.Active, *ref)
}
sj.Status.LastScheduleTime = &metav1.Time{Time: scheduledTime}
if _, err := sjc.UpdateStatus(&sj); err != nil {
glog.Infof("Unable to update status for %s (rv = %s): %v", nameForLog, sj.ResourceVersion, err)
}
return
}
func getRef(object runtime.Object) (*v1.ObjectReference, error) {
return v1.GetReference(object)
}

View file

@ -0,0 +1,416 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cronjob
import (
"testing"
"time"
"k8s.io/kubernetes/pkg/api/v1"
batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/types"
)
// schedule is hourly on the hour
var (
onTheHour string = "0 * * * ?"
)
func justBeforeTheHour() time.Time {
T1, err := time.Parse(time.RFC3339, "2016-05-19T09:59:00Z")
if err != nil {
panic("test setup error")
}
return T1
}
func topOfTheHour() time.Time {
T1, err := time.Parse(time.RFC3339, "2016-05-19T10:00:00Z")
if err != nil {
panic("test setup error")
}
return T1
}
func justAfterTheHour() time.Time {
T1, err := time.Parse(time.RFC3339, "2016-05-19T10:01:00Z")
if err != nil {
panic("test setup error")
}
return T1
}
func justBeforeThePriorHour() time.Time {
T1, err := time.Parse(time.RFC3339, "2016-05-19T08:59:00Z")
if err != nil {
panic("test setup error")
}
return T1
}
func justAfterThePriorHour() time.Time {
T1, err := time.Parse(time.RFC3339, "2016-05-19T09:01:00Z")
if err != nil {
panic("test setup error")
}
return T1
}
// returns a cronJob with some fields filled in.
func cronJob() batch.CronJob {
return batch.CronJob{
ObjectMeta: v1.ObjectMeta{
Name: "mycronjob",
Namespace: "snazzycats",
UID: types.UID("1a2b3c"),
SelfLink: "/apis/batch/v2alpha1/namespaces/snazzycats/cronjobs/mycronjob",
CreationTimestamp: metav1.Time{Time: justBeforeTheHour()},
},
Spec: batch.CronJobSpec{
Schedule: "* * * * ?",
ConcurrencyPolicy: batch.AllowConcurrent,
JobTemplate: batch.JobTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"a": "b"},
Annotations: map[string]string{"x": "y"},
},
Spec: jobSpec(),
},
},
}
}
func jobSpec() batch.JobSpec {
one := int32(1)
return batch.JobSpec{
Parallelism: &one,
Completions: &one,
Template: v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"foo": "bar",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Image: "foo/bar"},
},
},
},
}
}
func newJob(UID string) batch.Job {
return batch.Job{
ObjectMeta: v1.ObjectMeta{
UID: types.UID(UID),
Name: "foobar",
Namespace: v1.NamespaceDefault,
SelfLink: "/apis/batch/v1/namespaces/snazzycats/jobs/myjob",
},
Spec: jobSpec(),
}
}
var (
shortDead int64 = 10
longDead int64 = 1000000
noDead int64 = -12345
A batch.ConcurrencyPolicy = batch.AllowConcurrent
f batch.ConcurrencyPolicy = batch.ForbidConcurrent
R batch.ConcurrencyPolicy = batch.ReplaceConcurrent
T bool = true
F bool = false
)
func TestSyncOne_RunOrNot(t *testing.T) {
testCases := map[string]struct {
// sj spec
concurrencyPolicy batch.ConcurrencyPolicy
suspend bool
schedule string
deadline int64
// sj status
ranPreviously bool
stillActive bool
// environment
now time.Time
// expectations
expectCreate bool
expectDelete bool
expectActive int
}{
"never ran, not time, A": {A, F, onTheHour, noDead, F, F, justBeforeTheHour(), F, F, 0},
"never ran, not time, F": {f, F, onTheHour, noDead, F, F, justBeforeTheHour(), F, F, 0},
"never ran, not time, R": {R, F, onTheHour, noDead, F, F, justBeforeTheHour(), F, F, 0},
"never ran, is time, A": {A, F, onTheHour, noDead, F, F, justAfterTheHour(), T, F, 1},
"never ran, is time, F": {f, F, onTheHour, noDead, F, F, justAfterTheHour(), T, F, 1},
"never ran, is time, R": {R, F, onTheHour, noDead, F, F, justAfterTheHour(), T, F, 1},
"never ran, is time, suspended": {A, T, onTheHour, noDead, F, F, justAfterTheHour(), F, F, 0},
"never ran, is time, past deadline": {A, F, onTheHour, shortDead, F, F, justAfterTheHour(), F, F, 0},
"never ran, is time, not past deadline": {A, F, onTheHour, longDead, F, F, justAfterTheHour(), T, F, 1},
"prev ran but done, not time, A": {A, F, onTheHour, noDead, T, F, justBeforeTheHour(), F, F, 0},
"prev ran but done, not time, F": {f, F, onTheHour, noDead, T, F, justBeforeTheHour(), F, F, 0},
"prev ran but done, not time, R": {R, F, onTheHour, noDead, T, F, justBeforeTheHour(), F, F, 0},
"prev ran but done, is time, A": {A, F, onTheHour, noDead, T, F, justAfterTheHour(), T, F, 1},
"prev ran but done, is time, F": {f, F, onTheHour, noDead, T, F, justAfterTheHour(), T, F, 1},
"prev ran but done, is time, R": {R, F, onTheHour, noDead, T, F, justAfterTheHour(), T, F, 1},
"prev ran but done, is time, suspended": {A, T, onTheHour, noDead, T, F, justAfterTheHour(), F, F, 0},
"prev ran but done, is time, past deadline": {A, F, onTheHour, shortDead, T, F, justAfterTheHour(), F, F, 0},
"prev ran but done, is time, not past deadline": {A, F, onTheHour, longDead, T, F, justAfterTheHour(), T, F, 1},
"still active, not time, A": {A, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, F, 1},
"still active, not time, F": {f, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, F, 1},
"still active, not time, R": {R, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, F, 1},
"still active, is time, A": {A, F, onTheHour, noDead, T, T, justAfterTheHour(), T, F, 2},
"still active, is time, F": {f, F, onTheHour, noDead, T, T, justAfterTheHour(), F, F, 1},
"still active, is time, R": {R, F, onTheHour, noDead, T, T, justAfterTheHour(), T, T, 1},
"still active, is time, suspended": {A, T, onTheHour, noDead, T, T, justAfterTheHour(), F, F, 1},
"still active, is time, past deadline": {A, F, onTheHour, shortDead, T, T, justAfterTheHour(), F, F, 1},
"still active, is time, not past deadline": {A, F, onTheHour, longDead, T, T, justAfterTheHour(), T, F, 2},
}
for name, tc := range testCases {
sj := cronJob()
sj.Spec.ConcurrencyPolicy = tc.concurrencyPolicy
sj.Spec.Suspend = &tc.suspend
sj.Spec.Schedule = tc.schedule
if tc.deadline != noDead {
sj.Spec.StartingDeadlineSeconds = &tc.deadline
}
var (
job *batch.Job
err error
)
js := []batch.Job{}
if tc.ranPreviously {
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: justBeforeThePriorHour()}
sj.Status.LastScheduleTime = &metav1.Time{Time: justAfterThePriorHour()}
job, err = getJobFromTemplate(&sj, sj.Status.LastScheduleTime.Time)
if err != nil {
t.Fatalf("%s: nexpected error creating a job from template: %v", name, err)
}
job.UID = "1234"
job.Namespace = ""
if tc.stillActive {
sj.Status.Active = []v1.ObjectReference{{UID: job.UID}}
js = append(js, *job)
}
} else {
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: justBeforeTheHour()}
if tc.stillActive {
t.Errorf("%s: test setup error: this case makes no sense", name)
}
}
jc := &fakeJobControl{Job: job}
sjc := &fakeSJControl{}
pc := &fakePodControl{}
recorder := record.NewFakeRecorder(10)
SyncOne(sj, js, tc.now, jc, sjc, pc, recorder)
expectedCreates := 0
if tc.expectCreate {
expectedCreates = 1
}
if len(jc.Jobs) != expectedCreates {
t.Errorf("%s: expected %d job started, actually %v", name, expectedCreates, len(jc.Jobs))
}
expectedDeletes := 0
if tc.expectDelete {
expectedDeletes = 1
}
if len(jc.DeleteJobName) != expectedDeletes {
t.Errorf("%s: expected %d job deleted, actually %v", name, expectedDeletes, len(jc.DeleteJobName))
}
// Status update happens once when ranging through job list, and another one if create jobs.
expectUpdates := 1
expectedEvents := 0
if tc.expectCreate {
expectedEvents++
expectUpdates++
}
if tc.expectDelete {
expectedEvents++
}
if len(recorder.Events) != expectedEvents {
t.Errorf("%s: expected %d event, actually %v", name, expectedEvents, len(recorder.Events))
}
if tc.expectActive != len(sjc.Updates[expectUpdates-1].Status.Active) {
t.Errorf("%s: expected Active size %d, got %d", name, tc.expectActive, len(sjc.Updates[expectUpdates-1].Status.Active))
}
}
}
// TODO: simulation where the controller randomly doesn't run, and randomly has errors starting jobs or deleting jobs,
// but over time, all jobs run as expected (assuming Allow and no deadline).
// TestSyncOne_Status tests sj.UpdateStatus in SyncOne
func TestSyncOne_Status(t *testing.T) {
finishedJob := newJob("1")
finishedJob.Status.Conditions = append(finishedJob.Status.Conditions, batch.JobCondition{Type: batch.JobComplete, Status: v1.ConditionTrue})
unexpectedJob := newJob("2")
testCases := map[string]struct {
// sj spec
concurrencyPolicy batch.ConcurrencyPolicy
suspend bool
schedule string
deadline int64
// sj status
ranPreviously bool
hasFinishedJob bool
// environment
now time.Time
hasUnexpectedJob bool
// expectations
expectCreate bool
expectDelete bool
}{
"never ran, not time, A": {A, F, onTheHour, noDead, F, F, justBeforeTheHour(), F, F, F},
"never ran, not time, F": {f, F, onTheHour, noDead, F, F, justBeforeTheHour(), F, F, F},
"never ran, not time, R": {R, F, onTheHour, noDead, F, F, justBeforeTheHour(), F, F, F},
"never ran, is time, A": {A, F, onTheHour, noDead, F, F, justAfterTheHour(), F, T, F},
"never ran, is time, F": {f, F, onTheHour, noDead, F, F, justAfterTheHour(), F, T, F},
"never ran, is time, R": {R, F, onTheHour, noDead, F, F, justAfterTheHour(), F, T, F},
"never ran, is time, suspended": {A, T, onTheHour, noDead, F, F, justAfterTheHour(), F, F, F},
"never ran, is time, past deadline": {A, F, onTheHour, shortDead, F, F, justAfterTheHour(), F, F, F},
"never ran, is time, not past deadline": {A, F, onTheHour, longDead, F, F, justAfterTheHour(), F, T, F},
"prev ran but done, not time, A": {A, F, onTheHour, noDead, T, F, justBeforeTheHour(), F, F, F},
"prev ran but done, not time, finished job, A": {A, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, F, F},
"prev ran but done, not time, unexpected job, A": {A, F, onTheHour, noDead, T, F, justBeforeTheHour(), T, F, F},
"prev ran but done, not time, finished job, unexpected job, A": {A, F, onTheHour, noDead, T, T, justBeforeTheHour(), T, F, F},
"prev ran but done, not time, finished job, F": {f, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, F, F},
"prev ran but done, not time, unexpected job, R": {R, F, onTheHour, noDead, T, F, justBeforeTheHour(), T, F, F},
"prev ran but done, is time, A": {A, F, onTheHour, noDead, T, F, justAfterTheHour(), F, T, F},
"prev ran but done, is time, finished job, A": {A, F, onTheHour, noDead, T, T, justAfterTheHour(), F, T, F},
"prev ran but done, is time, unexpected job, A": {A, F, onTheHour, noDead, T, F, justAfterTheHour(), T, T, F},
"prev ran but done, is time, finished job, unexpected job, A": {A, F, onTheHour, noDead, T, T, justAfterTheHour(), T, T, F},
"prev ran but done, is time, F": {f, F, onTheHour, noDead, T, F, justAfterTheHour(), F, T, F},
"prev ran but done, is time, finished job, F": {f, F, onTheHour, noDead, T, T, justAfterTheHour(), F, T, F},
"prev ran but done, is time, unexpected job, F": {f, F, onTheHour, noDead, T, F, justAfterTheHour(), T, T, F},
"prev ran but done, is time, finished job, unexpected job, F": {f, F, onTheHour, noDead, T, T, justAfterTheHour(), T, T, F},
"prev ran but done, is time, R": {R, F, onTheHour, noDead, T, F, justAfterTheHour(), F, T, F},
"prev ran but done, is time, finished job, R": {R, F, onTheHour, noDead, T, T, justAfterTheHour(), F, T, F},
"prev ran but done, is time, unexpected job, R": {R, F, onTheHour, noDead, T, F, justAfterTheHour(), T, T, F},
"prev ran but done, is time, finished job, unexpected job, R": {R, F, onTheHour, noDead, T, T, justAfterTheHour(), T, T, F},
"prev ran but done, is time, suspended": {A, T, onTheHour, noDead, T, F, justAfterTheHour(), F, F, F},
"prev ran but done, is time, finished job, suspended": {A, T, onTheHour, noDead, T, T, justAfterTheHour(), F, F, F},
"prev ran but done, is time, unexpected job, suspended": {A, T, onTheHour, noDead, T, F, justAfterTheHour(), T, F, F},
"prev ran but done, is time, finished job, unexpected job, suspended": {A, T, onTheHour, noDead, T, T, justAfterTheHour(), T, F, F},
"prev ran but done, is time, past deadline": {A, F, onTheHour, shortDead, T, F, justAfterTheHour(), F, F, F},
"prev ran but done, is time, finished job, past deadline": {A, F, onTheHour, shortDead, T, T, justAfterTheHour(), F, F, F},
"prev ran but done, is time, unexpected job, past deadline": {A, F, onTheHour, shortDead, T, F, justAfterTheHour(), T, F, F},
"prev ran but done, is time, finished job, unexpected job, past deadline": {A, F, onTheHour, shortDead, T, T, justAfterTheHour(), T, F, F},
"prev ran but done, is time, not past deadline": {A, F, onTheHour, longDead, T, F, justAfterTheHour(), F, T, F},
"prev ran but done, is time, finished job, not past deadline": {A, F, onTheHour, longDead, T, T, justAfterTheHour(), F, T, F},
"prev ran but done, is time, unexpected job, not past deadline": {A, F, onTheHour, longDead, T, F, justAfterTheHour(), T, T, F},
"prev ran but done, is time, finished job, unexpected job, not past deadline": {A, F, onTheHour, longDead, T, T, justAfterTheHour(), T, T, F},
}
for name, tc := range testCases {
// Setup the test
sj := cronJob()
sj.Spec.ConcurrencyPolicy = tc.concurrencyPolicy
sj.Spec.Suspend = &tc.suspend
sj.Spec.Schedule = tc.schedule
if tc.deadline != noDead {
sj.Spec.StartingDeadlineSeconds = &tc.deadline
}
if tc.ranPreviously {
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: justBeforeThePriorHour()}
sj.Status.LastScheduleTime = &metav1.Time{Time: justAfterThePriorHour()}
} else {
if tc.hasFinishedJob || tc.hasUnexpectedJob {
t.Errorf("%s: test setup error: this case makes no sense", name)
}
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: justBeforeTheHour()}
}
jobs := []batch.Job{}
if tc.hasFinishedJob {
ref, err := getRef(&finishedJob)
if err != nil {
t.Errorf("%s: test setup error: failed to get job's ref: %v.", name, err)
}
sj.Status.Active = []v1.ObjectReference{*ref}
jobs = append(jobs, finishedJob)
}
if tc.hasUnexpectedJob {
jobs = append(jobs, unexpectedJob)
}
jc := &fakeJobControl{}
sjc := &fakeSJControl{}
pc := &fakePodControl{}
recorder := record.NewFakeRecorder(10)
// Run the code
SyncOne(sj, jobs, tc.now, jc, sjc, pc, recorder)
// Status update happens once when ranging through job list, and another one if create jobs.
expectUpdates := 1
// Events happens when there's unexpected / finished jobs, and upon job creation / deletion.
expectedEvents := 0
if tc.expectCreate {
expectUpdates++
expectedEvents++
}
if tc.expectDelete {
expectedEvents++
}
if tc.hasFinishedJob {
expectedEvents++
}
if tc.hasUnexpectedJob {
expectedEvents++
}
if len(recorder.Events) != expectedEvents {
t.Errorf("%s: expected %d event, actually %v: %#v", name, expectedEvents, len(recorder.Events), recorder.Events)
}
if expectUpdates != len(sjc.Updates) {
t.Errorf("%s: expected %d status updates, actually %d", name, expectUpdates, len(sjc.Updates))
}
if tc.hasFinishedJob && inActiveList(sjc.Updates[0], finishedJob.UID) {
t.Errorf("%s: expected finished job removed from active list, actually active list = %#v", name, sjc.Updates[0].Status.Active)
}
if tc.hasUnexpectedJob && inActiveList(sjc.Updates[0], unexpectedJob.UID) {
t.Errorf("%s: expected unexpected job not added to active list, actually active list = %#v", name, sjc.Updates[0].Status.Active)
}
if tc.expectCreate && !sjc.Updates[1].Status.LastScheduleTime.Time.Equal(topOfTheHour()) {
t.Errorf("%s: expected LastScheduleTime updated to %s, got %s", name, topOfTheHour(), sjc.Updates[1].Status.LastScheduleTime)
}
}
}

18
vendor/k8s.io/kubernetes/pkg/controller/cronjob/doc.go generated vendored Normal file
View file

@ -0,0 +1,18 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cronjob contains the controller for CronJob objects.
package cronjob

View file

@ -0,0 +1,224 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cronjob
import (
"fmt"
"sync"
"k8s.io/kubernetes/pkg/api/v1"
batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/labels"
)
// sjControlInterface is an interface that knows how to update CronJob status
// created as an interface to allow testing.
type sjControlInterface interface {
UpdateStatus(sj *batch.CronJob) (*batch.CronJob, error)
}
// realSJControl is the default implementation of sjControlInterface.
type realSJControl struct {
KubeClient clientset.Interface
}
var _ sjControlInterface = &realSJControl{}
func (c *realSJControl) UpdateStatus(sj *batch.CronJob) (*batch.CronJob, error) {
return c.KubeClient.BatchV2alpha1().CronJobs(sj.Namespace).UpdateStatus(sj)
}
// fakeSJControl is the default implementation of sjControlInterface.
type fakeSJControl struct {
Updates []batch.CronJob
}
var _ sjControlInterface = &fakeSJControl{}
func (c *fakeSJControl) UpdateStatus(sj *batch.CronJob) (*batch.CronJob, error) {
c.Updates = append(c.Updates, *sj)
return sj, nil
}
// ------------------------------------------------------------------ //
// jobControlInterface is an interface that knows how to add or delete jobs
// created as an interface to allow testing.
type jobControlInterface interface {
// GetJob retrieves a job
GetJob(namespace, name string) (*batch.Job, error)
// CreateJob creates new jobs according to the spec
CreateJob(namespace string, job *batch.Job) (*batch.Job, error)
// UpdateJob updates a job
UpdateJob(namespace string, job *batch.Job) (*batch.Job, error)
// DeleteJob deletes the job identified by name.
// TODO: delete by UID?
DeleteJob(namespace string, name string) error
}
// realJobControl is the default implementation of jobControlInterface.
type realJobControl struct {
KubeClient clientset.Interface
Recorder record.EventRecorder
}
var _ jobControlInterface = &realJobControl{}
func copyLabels(template *batch.JobTemplateSpec) labels.Set {
l := make(labels.Set)
for k, v := range template.Labels {
l[k] = v
}
return l
}
func copyAnnotations(template *batch.JobTemplateSpec) labels.Set {
a := make(labels.Set)
for k, v := range template.Annotations {
a[k] = v
}
return a
}
func (r realJobControl) GetJob(namespace, name string) (*batch.Job, error) {
return r.KubeClient.BatchV2alpha1().Jobs(namespace).Get(name)
}
func (r realJobControl) UpdateJob(namespace string, job *batch.Job) (*batch.Job, error) {
return r.KubeClient.BatchV2alpha1().Jobs(namespace).Update(job)
}
func (r realJobControl) CreateJob(namespace string, job *batch.Job) (*batch.Job, error) {
return r.KubeClient.BatchV2alpha1().Jobs(namespace).Create(job)
}
func (r realJobControl) DeleteJob(namespace string, name string) error {
return r.KubeClient.BatchV2alpha1().Jobs(namespace).Delete(name, nil)
}
type fakeJobControl struct {
sync.Mutex
Job *batch.Job
Jobs []batch.Job
DeleteJobName []string
Err error
}
var _ jobControlInterface = &fakeJobControl{}
func (f *fakeJobControl) CreateJob(namespace string, job *batch.Job) (*batch.Job, error) {
f.Lock()
defer f.Unlock()
if f.Err != nil {
return nil, f.Err
}
job.SelfLink = fmt.Sprintf("/api/batch/v1/namespaces/%s/jobs/%s", namespace, job.Name)
f.Jobs = append(f.Jobs, *job)
job.UID = "test-uid"
return job, nil
}
func (f *fakeJobControl) GetJob(namespace, name string) (*batch.Job, error) {
f.Lock()
defer f.Unlock()
if f.Err != nil {
return nil, f.Err
}
return f.Job, nil
}
func (f *fakeJobControl) UpdateJob(namespace string, job *batch.Job) (*batch.Job, error) {
f.Lock()
defer f.Unlock()
if f.Err != nil {
return nil, f.Err
}
return job, nil
}
func (f *fakeJobControl) DeleteJob(namespace string, name string) error {
f.Lock()
defer f.Unlock()
if f.Err != nil {
return f.Err
}
f.DeleteJobName = append(f.DeleteJobName, name)
return nil
}
func (f *fakeJobControl) Clear() {
f.Lock()
defer f.Unlock()
f.DeleteJobName = []string{}
f.Jobs = []batch.Job{}
f.Err = nil
}
// ------------------------------------------------------------------ //
// podControlInterface is an interface that knows how to list or delete pods
// created as an interface to allow testing.
type podControlInterface interface {
// ListPods list pods
ListPods(namespace string, opts v1.ListOptions) (*v1.PodList, error)
// DeleteJob deletes the pod identified by name.
// TODO: delete by UID?
DeletePod(namespace string, name string) error
}
// realPodControl is the default implementation of podControlInterface.
type realPodControl struct {
KubeClient clientset.Interface
Recorder record.EventRecorder
}
var _ podControlInterface = &realPodControl{}
func (r realPodControl) ListPods(namespace string, opts v1.ListOptions) (*v1.PodList, error) {
return r.KubeClient.Core().Pods(namespace).List(opts)
}
func (r realPodControl) DeletePod(namespace string, name string) error {
return r.KubeClient.Core().Pods(namespace).Delete(name, nil)
}
type fakePodControl struct {
sync.Mutex
Pods []v1.Pod
DeletePodName []string
Err error
}
var _ podControlInterface = &fakePodControl{}
func (f *fakePodControl) ListPods(namespace string, opts v1.ListOptions) (*v1.PodList, error) {
f.Lock()
defer f.Unlock()
return &v1.PodList{Items: f.Pods}, nil
}
func (f *fakePodControl) DeletePod(namespace string, name string) error {
f.Lock()
defer f.Unlock()
if f.Err != nil {
return f.Err
}
f.DeletePodName = append(f.DeletePodName, name)
return nil
}

View file

@ -0,0 +1,235 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cronjob
import (
"encoding/json"
"fmt"
"time"
"github.com/golang/glog"
"github.com/robfig/cron"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/types"
)
// Utilities for dealing with Jobs and CronJobs and time.
func inActiveList(sj batch.CronJob, uid types.UID) bool {
for _, j := range sj.Status.Active {
if j.UID == uid {
return true
}
}
return false
}
func deleteFromActiveList(sj *batch.CronJob, uid types.UID) {
if sj == nil {
return
}
newActive := []v1.ObjectReference{}
for _, j := range sj.Status.Active {
if j.UID != uid {
newActive = append(newActive, j)
}
}
sj.Status.Active = newActive
}
// getParentUIDFromJob extracts UID of job's parent and whether it was found
func getParentUIDFromJob(j batch.Job) (types.UID, bool) {
creatorRefJson, found := j.ObjectMeta.Annotations[v1.CreatedByAnnotation]
if !found {
glog.V(4).Infof("Job with no created-by annotation, name %s namespace %s", j.Name, j.Namespace)
return types.UID(""), false
}
var sr v1.SerializedReference
err := json.Unmarshal([]byte(creatorRefJson), &sr)
if err != nil {
glog.V(4).Infof("Job with unparsable created-by annotation, name %s namespace %s: %v", j.Name, j.Namespace, err)
return types.UID(""), false
}
if sr.Reference.Kind != "CronJob" {
glog.V(4).Infof("Job with non-CronJob parent, name %s namespace %s", j.Name, j.Namespace)
return types.UID(""), false
}
// Don't believe a job that claims to have a parent in a different namespace.
if sr.Reference.Namespace != j.Namespace {
glog.V(4).Infof("Alleged scheduledJob parent in different namespace (%s) from Job name %s namespace %s", sr.Reference.Namespace, j.Name, j.Namespace)
return types.UID(""), false
}
return sr.Reference.UID, true
}
// groupJobsByParent groups jobs into a map keyed by the job parent UID (e.g. scheduledJob).
// It has no receiver, to facilitate testing.
func groupJobsByParent(sjs []batch.CronJob, js []batch.Job) map[types.UID][]batch.Job {
jobsBySj := make(map[types.UID][]batch.Job)
for _, job := range js {
parentUID, found := getParentUIDFromJob(job)
if !found {
glog.Errorf("Unable to get uid from job %s in namespace %s", job.Name, job.Namespace)
continue
}
jobsBySj[parentUID] = append(jobsBySj[parentUID], job)
}
return jobsBySj
}
// getNextStartTimeAfter gets the latest scheduled start time that is less than "now", or an error.
func getNextStartTimeAfter(schedule string, now time.Time) (time.Time, error) {
// Using robfig/cron for cron scheduled parsing and next runtime
// computation. Not using the entire library because:
// - I want to detect when we missed a runtime due to being down.
// - How do I set the time such that I can detect the last known runtime?
// - I guess the functions could launch a go-routine to start the job and
// then return.
// How to handle concurrency control.
// How to detect changes to schedules or deleted schedules and then
// update the jobs?
sched, err := cron.Parse(schedule)
if err != nil {
return time.Unix(0, 0), fmt.Errorf("Unparseable schedule: %s : %s", schedule, err)
}
return sched.Next(now), nil
}
// getRecentUnmetScheduleTimes gets a slice of times (from oldest to latest) that have passed when a Job should have started but did not.
//
// If there are too many (>100) unstarted times, just give up and return an empty slice.
// If there were missed times prior to the last known start time, then those are not returned.
func getRecentUnmetScheduleTimes(sj batch.CronJob, now time.Time) ([]time.Time, error) {
starts := []time.Time{}
sched, err := cron.ParseStandard(sj.Spec.Schedule)
if err != nil {
return starts, fmt.Errorf("Unparseable schedule: %s : %s", sj.Spec.Schedule, err)
}
var earliestTime time.Time
if sj.Status.LastScheduleTime != nil {
earliestTime = sj.Status.LastScheduleTime.Time
} else {
// If none found, then this is either a recently created scheduledJob,
// or the active/completed info was somehow lost (contract for status
// in kubernetes says it may need to be recreated), or that we have
// started a job, but have not noticed it yet (distributed systems can
// have arbitrary delays). In any case, use the creation time of the
// CronJob as last known start time.
earliestTime = sj.ObjectMeta.CreationTimestamp.Time
}
if earliestTime.After(now) {
return []time.Time{}, nil
}
for t := sched.Next(earliestTime); !t.After(now); t = sched.Next(t) {
starts = append(starts, t)
// An object might miss several starts. For example, if
// controller gets wedged on friday at 5:01pm when everyone has
// gone home, and someone comes in on tuesday AM and discovers
// the problem and restarts the controller, then all the hourly
// jobs, more than 80 of them for one hourly scheduledJob, should
// all start running with no further intervention (if the scheduledJob
// allows concurrency and late starts).
//
// However, if there is a bug somewhere, or incorrect clock
// on controller's server or apiservers (for setting creationTimestamp)
// then there could be so many missed start times (it could be off
// by decades or more), that it would eat up all the CPU and memory
// of this controller. In that case, we want to not try to list
// all the misseded start times.
//
// I've somewhat arbitrarily picked 100, as more than 80, but
// but less than "lots".
if len(starts) > 100 {
// We can't get the most recent times so just return an empty slice
return []time.Time{}, fmt.Errorf("Too many missed start times to list")
}
}
return starts, nil
}
// XXX unit test this
// getJobFromTemplate makes a Job from a CronJob
func getJobFromTemplate(sj *batch.CronJob, scheduledTime time.Time) (*batch.Job, error) {
// TODO: consider adding the following labels:
// nominal-start-time=$RFC_3339_DATE_OF_INTENDED_START -- for user convenience
// scheduled-job-name=$SJ_NAME -- for user convenience
labels := copyLabels(&sj.Spec.JobTemplate)
annotations := copyAnnotations(&sj.Spec.JobTemplate)
createdByRefJson, err := makeCreatedByRefJson(sj)
if err != nil {
return nil, err
}
annotations[v1.CreatedByAnnotation] = string(createdByRefJson)
// We want job names for a given nominal start time to have a deterministic name to avoid the same job being created twice
name := fmt.Sprintf("%s-%d", sj.Name, getTimeHash(scheduledTime))
job := &batch.Job{
ObjectMeta: v1.ObjectMeta{
Labels: labels,
Annotations: annotations,
Name: name,
},
}
if err := api.Scheme.Convert(&sj.Spec.JobTemplate.Spec, &job.Spec, nil); err != nil {
return nil, fmt.Errorf("unable to convert job template: %v", err)
}
return job, nil
}
// Return Unix Epoch Time
func getTimeHash(scheduledTime time.Time) int64 {
return scheduledTime.Unix()
}
// makeCreatedByRefJson makes a json string with an object reference for use in "created-by" annotation value
func makeCreatedByRefJson(object runtime.Object) (string, error) {
createdByRef, err := v1.GetReference(object)
if err != nil {
return "", fmt.Errorf("unable to get controller reference: %v", err)
}
// TODO: this code was not safe previously - as soon as new code came along that switched to v2, old clients
// would be broken upon reading it. This is explicitly hardcoded to v1 to guarantee predictable deployment.
// We need to consistently handle this case of annotation versioning.
codec := api.Codecs.LegacyCodec(schema.GroupVersion{Group: v1.GroupName, Version: "v1"})
createdByRefJson, err := runtime.Encode(codec, &v1.SerializedReference{
Reference: *createdByRef,
})
if err != nil {
return "", fmt.Errorf("unable to serialize controller reference: %v", err)
}
return string(createdByRefJson), nil
}
func IsJobFinished(j *batch.Job) bool {
for _, c := range j.Status.Conditions {
if (c.Type == batch.JobComplete || c.Type == batch.JobFailed) && c.Status == v1.ConditionTrue {
return true
}
}
return false
}

View file

@ -0,0 +1,380 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cronjob
import (
//"fmt"
"strings"
"testing"
"time"
"k8s.io/kubernetes/pkg/api/v1"
batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/types"
//"k8s.io/kubernetes/pkg/controller"
// "k8s.io/kubernetes/pkg/util/rand"
)
func TestGetJobFromTemplate(t *testing.T) {
// getJobFromTemplate() needs to take the job template and copy the labels and annotations
// and other fields, and add a created-by reference.
var one int64 = 1
var no bool = false
sj := batch.CronJob{
ObjectMeta: v1.ObjectMeta{
Name: "mycronjob",
Namespace: "snazzycats",
UID: types.UID("1a2b3c"),
SelfLink: "/apis/extensions/v1beta1/namespaces/snazzycats/jobs/mycronjob",
},
Spec: batch.CronJobSpec{
Schedule: "* * * * ?",
ConcurrencyPolicy: batch.AllowConcurrent,
JobTemplate: batch.JobTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"a": "b"},
Annotations: map[string]string{"x": "y"},
},
Spec: batch.JobSpec{
ActiveDeadlineSeconds: &one,
ManualSelector: &no,
Template: v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"foo": "bar",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Image: "foo/bar"},
},
},
},
},
},
},
}
var job *batch.Job
job, err := getJobFromTemplate(&sj, time.Time{})
if err != nil {
t.Errorf("Did not expect error: %s", err)
}
if !strings.HasPrefix(job.ObjectMeta.Name, "mycronjob-") {
t.Errorf("Wrong Name")
}
if len(job.ObjectMeta.Labels) != 1 {
t.Errorf("Wrong number of labels")
}
if len(job.ObjectMeta.Annotations) != 2 {
t.Errorf("Wrong number of annotations")
}
v, ok := job.ObjectMeta.Annotations[v1.CreatedByAnnotation]
if !ok {
t.Errorf("Missing created-by annotation")
}
expectedCreatedBy := `{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"CronJob","namespace":"snazzycats","name":"mycronjob","uid":"1a2b3c","apiVersion":"extensions"}}
`
if len(v) != len(expectedCreatedBy) {
t.Errorf("Wrong length for created-by annotation, expected %v got %v", len(expectedCreatedBy), len(v))
}
if v != expectedCreatedBy {
t.Errorf("Wrong value for created-by annotation, expected %v got %v", expectedCreatedBy, v)
}
}
func TestGetParentUIDFromJob(t *testing.T) {
j := &batch.Job{
ObjectMeta: v1.ObjectMeta{
Name: "foobar",
Namespace: v1.NamespaceDefault,
},
Spec: batch.JobSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"},
},
Template: v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"foo": "bar",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Image: "foo/bar"},
},
},
},
},
Status: batch.JobStatus{
Conditions: []batch.JobCondition{{
Type: batch.JobComplete,
Status: v1.ConditionTrue,
}},
},
}
{
// Case 1: No UID annotation
_, found := getParentUIDFromJob(*j)
if found {
t.Errorf("Unexpectedly found uid")
}
}
{
// Case 2: Has UID annotation
j.ObjectMeta.Annotations = map[string]string{v1.CreatedByAnnotation: `{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"CronJob","namespace":"default","name":"pi","uid":"5ef034e0-1890-11e6-8935-42010af0003e","apiVersion":"extensions","resourceVersion":"427339"}}`}
expectedUID := types.UID("5ef034e0-1890-11e6-8935-42010af0003e")
uid, found := getParentUIDFromJob(*j)
if !found {
t.Errorf("Unexpectedly did not find uid")
} else if uid != expectedUID {
t.Errorf("Wrong UID: %v", uid)
}
}
}
func TestGroupJobsByParent(t *testing.T) {
uid1 := types.UID("11111111-1111-1111-1111-111111111111")
uid2 := types.UID("22222222-2222-2222-2222-222222222222")
uid3 := types.UID("33333333-3333-3333-3333-333333333333")
createdBy1 := map[string]string{v1.CreatedByAnnotation: `{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"CronJob","namespace":"x","name":"pi","uid":"11111111-1111-1111-1111-111111111111","apiVersion":"extensions","resourceVersion":"111111"}}`}
createdBy2 := map[string]string{v1.CreatedByAnnotation: `{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"CronJob","namespace":"x","name":"pi","uid":"22222222-2222-2222-2222-222222222222","apiVersion":"extensions","resourceVersion":"222222"}}`}
createdBy3 := map[string]string{v1.CreatedByAnnotation: `{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"CronJob","namespace":"y","name":"pi","uid":"33333333-3333-3333-3333-333333333333","apiVersion":"extensions","resourceVersion":"333333"}}`}
noCreatedBy := map[string]string{}
{
// Case 1: There are no jobs and scheduledJobs
sjs := []batch.CronJob{}
js := []batch.Job{}
jobsBySj := groupJobsByParent(sjs, js)
if len(jobsBySj) != 0 {
t.Errorf("Wrong number of items in map")
}
}
{
// Case 2: there is one controller with no job.
sjs := []batch.CronJob{
{ObjectMeta: v1.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}},
}
js := []batch.Job{}
jobsBySj := groupJobsByParent(sjs, js)
if len(jobsBySj) != 0 {
t.Errorf("Wrong number of items in map")
}
}
{
// Case 3: there is one controller with one job it created.
sjs := []batch.CronJob{
{ObjectMeta: v1.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}},
}
js := []batch.Job{
{ObjectMeta: v1.ObjectMeta{Name: "a", Namespace: "x", Annotations: createdBy1}},
}
jobsBySj := groupJobsByParent(sjs, js)
if len(jobsBySj) != 1 {
t.Errorf("Wrong number of items in map")
}
jobList1, found := jobsBySj[uid1]
if !found {
t.Errorf("Key not found")
}
if len(jobList1) != 1 {
t.Errorf("Wrong number of items in map")
}
}
{
// Case 4: Two namespaces, one has two jobs from one controller, other has 3 jobs from two controllers.
// There are also two jobs with no created-by annotation.
js := []batch.Job{
{ObjectMeta: v1.ObjectMeta{Name: "a", Namespace: "x", Annotations: createdBy1}},
{ObjectMeta: v1.ObjectMeta{Name: "b", Namespace: "x", Annotations: createdBy2}},
{ObjectMeta: v1.ObjectMeta{Name: "c", Namespace: "x", Annotations: createdBy1}},
{ObjectMeta: v1.ObjectMeta{Name: "d", Namespace: "x", Annotations: noCreatedBy}},
{ObjectMeta: v1.ObjectMeta{Name: "a", Namespace: "y", Annotations: createdBy3}},
{ObjectMeta: v1.ObjectMeta{Name: "b", Namespace: "y", Annotations: createdBy3}},
{ObjectMeta: v1.ObjectMeta{Name: "d", Namespace: "y", Annotations: noCreatedBy}},
}
sjs := []batch.CronJob{
{ObjectMeta: v1.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}},
{ObjectMeta: v1.ObjectMeta{Name: "f", Namespace: "x", UID: uid2}},
{ObjectMeta: v1.ObjectMeta{Name: "g", Namespace: "y", UID: uid3}},
}
jobsBySj := groupJobsByParent(sjs, js)
if len(jobsBySj) != 3 {
t.Errorf("Wrong number of items in map")
}
jobList1, found := jobsBySj[uid1]
if !found {
t.Errorf("Key not found")
}
if len(jobList1) != 2 {
t.Errorf("Wrong number of items in map")
}
jobList2, found := jobsBySj[uid2]
if !found {
t.Errorf("Key not found")
}
if len(jobList2) != 1 {
t.Errorf("Wrong number of items in map")
}
jobList3, found := jobsBySj[uid3]
if !found {
t.Errorf("Key not found")
}
if len(jobList3) != 2 {
t.Errorf("Wrong number of items in map")
}
}
}
func TestGetRecentUnmetScheduleTimes(t *testing.T) {
// schedule is hourly on the hour
schedule := "0 * * * ?"
// T1 is a scheduled start time of that schedule
T1, err := time.Parse(time.RFC3339, "2016-05-19T10:00:00Z")
if err != nil {
t.Errorf("test setup error: %v", err)
}
// T2 is a scheduled start time of that schedule after T1
T2, err := time.Parse(time.RFC3339, "2016-05-19T11:00:00Z")
if err != nil {
t.Errorf("test setup error: %v", err)
}
sj := batch.CronJob{
ObjectMeta: v1.ObjectMeta{
Name: "mycronjob",
Namespace: v1.NamespaceDefault,
UID: types.UID("1a2b3c"),
},
Spec: batch.CronJobSpec{
Schedule: schedule,
ConcurrencyPolicy: batch.AllowConcurrent,
JobTemplate: batch.JobTemplateSpec{},
},
}
{
// Case 1: no known start times, and none needed yet.
// Creation time is before T1.
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: T1.Add(-10 * time.Minute)}
// Current time is more than creation time, but less than T1.
now := T1.Add(-7 * time.Minute)
times, err := getRecentUnmetScheduleTimes(sj, now)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(times) != 0 {
t.Errorf("expected no start times, got: %v", times)
}
}
{
// Case 2: no known start times, and one needed.
// Creation time is before T1.
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: T1.Add(-10 * time.Minute)}
// Current time is after T1
now := T1.Add(2 * time.Second)
times, err := getRecentUnmetScheduleTimes(sj, now)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(times) != 1 {
t.Errorf("expected 1 start time, got: %v", times)
} else if !times[0].Equal(T1) {
t.Errorf("expected: %v, got: %v", T1, times[0])
}
}
{
// Case 3: known LastScheduleTime, no start needed.
// Creation time is before T1.
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: T1.Add(-10 * time.Minute)}
// Status shows a start at the expected time.
sj.Status.LastScheduleTime = &metav1.Time{Time: T1}
// Current time is after T1
now := T1.Add(2 * time.Minute)
times, err := getRecentUnmetScheduleTimes(sj, now)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(times) != 0 {
t.Errorf("expected 0 start times, got: , got: %v", times)
}
}
{
// Case 4: known LastScheduleTime, a start needed
// Creation time is before T1.
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: T1.Add(-10 * time.Minute)}
// Status shows a start at the expected time.
sj.Status.LastScheduleTime = &metav1.Time{Time: T1}
// Current time is after T1 and after T2
now := T2.Add(5 * time.Minute)
times, err := getRecentUnmetScheduleTimes(sj, now)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(times) != 1 {
t.Errorf("expected 2 start times, got: , got: %v", times)
} else if !times[0].Equal(T2) {
t.Errorf("expected: %v, got: %v", T1, times[0])
}
}
{
// Case 5: known LastScheduleTime, two starts needed
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: T1.Add(-2 * time.Hour)}
sj.Status.LastScheduleTime = &metav1.Time{Time: T1.Add(-1 * time.Hour)}
// Current time is after T1 and after T2
now := T2.Add(5 * time.Minute)
times, err := getRecentUnmetScheduleTimes(sj, now)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(times) != 2 {
t.Errorf("expected 2 start times, got: , got: %v", times)
} else {
if !times[0].Equal(T1) {
t.Errorf("expected: %v, got: %v", T1, times[0])
}
if !times[1].Equal(T2) {
t.Errorf("expected: %v, got: %v", T2, times[1])
}
}
}
{
// Case 6: now is way way ahead of last start time.
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: T1.Add(-2 * time.Hour)}
sj.Status.LastScheduleTime = &metav1.Time{Time: T1.Add(-1 * time.Hour)}
now := T2.Add(10 * 24 * time.Hour)
_, err := getRecentUnmetScheduleTimes(sj, now)
if err == nil {
t.Errorf("unexpected lack of error")
}
}
}

63
vendor/k8s.io/kubernetes/pkg/controller/daemon/BUILD generated vendored Normal file
View file

@ -0,0 +1,63 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"daemoncontroller.go",
"doc.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/core/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/extensions/v1beta1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/informers:go_default_library",
"//pkg/labels:go_default_library",
"//pkg/util/errors:go_default_library",
"//pkg/util/metrics:go_default_library",
"//pkg/util/runtime:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/util/workqueue:go_default_library",
"//plugin/pkg/scheduler/algorithm/predicates:go_default_library",
"//plugin/pkg/scheduler/schedulercache:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = ["daemoncontroller_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/resource:go_default_library",
"//pkg/api/testapi:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apimachinery/registered:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/restclient:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/informers:go_default_library",
"//pkg/securitycontext:go_default_library",
"//pkg/util/wait:go_default_library",
],
)

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

19
vendor/k8s.io/kubernetes/pkg/controller/daemon/doc.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package daemon contains logic for watching and synchronizing
// daemons.
package daemon // import "k8s.io/kubernetes/pkg/controller/daemon"

View file

@ -0,0 +1,77 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"deployment_controller.go",
"progress.go",
"recreate.go",
"rollback.go",
"rolling.go",
"sync.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/errors:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/core/v1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/client/retry:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//pkg/controller/informers:go_default_library",
"//pkg/labels:go_default_library",
"//pkg/util/errors:go_default_library",
"//pkg/util/integer:go_default_library",
"//pkg/util/labels:go_default_library",
"//pkg/util/metrics:go_default_library",
"//pkg/util/pod:go_default_library",
"//pkg/util/replicaset:go_default_library",
"//pkg/util/runtime:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/util/workqueue:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = [
"deployment_controller_test.go",
"rolling_test.go",
"sync_test.go",
],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/apimachinery/registered:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//pkg/controller/informers:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/runtime/schema:go_default_library",
"//pkg/util/intstr:go_default_library",
"//pkg/util/uuid:go_default_library",
],
)

View file

@ -0,0 +1,455 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package deployment contains all the logic for handling Kubernetes Deployments.
// It implements a set of strategies (rolling, recreate) for deploying an application,
// the means to rollback to previous versions, proportional scaling for mitigating
// risk, cleanup policy, and other useful features of Deployments.
package deployment
import (
"fmt"
"reflect"
"sort"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/metrics"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/util/workqueue"
)
const (
// FullDeploymentResyncPeriod means we'll attempt to recompute the required replicas
// of all deployments.
// This recomputation happens based on contents in the local caches.
FullDeploymentResyncPeriod = 30 * time.Second
// We must avoid creating new replica set / counting pods until the replica set / pods store has synced.
// If it hasn't synced, to avoid a hot loop, we'll wait this long between checks.
StoreSyncedPollPeriod = 100 * time.Millisecond
// MaxRetries is the number of times a deployment will be retried before it is dropped out of the queue.
MaxRetries = 5
)
// DeploymentController is responsible for synchronizing Deployment objects stored
// in the system with actual running replica sets and pods.
type DeploymentController struct {
client clientset.Interface
eventRecorder record.EventRecorder
// To allow injection of syncDeployment for testing.
syncHandler func(dKey string) error
// A store of deployments, populated by the dController
dLister *cache.StoreToDeploymentLister
// A store of ReplicaSets, populated by the rsController
rsLister *cache.StoreToReplicaSetLister
// A store of pods, populated by the podController
podLister *cache.StoreToPodLister
// dListerSynced returns true if the Deployment store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
dListerSynced cache.InformerSynced
// rsListerSynced returns true if the ReplicaSet store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
rsListerSynced cache.InformerSynced
// podListerSynced returns true if the pod store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
podListerSynced cache.InformerSynced
// Deployments that need to be synced
queue workqueue.RateLimitingInterface
}
// NewDeploymentController creates a new DeploymentController.
func NewDeploymentController(dInformer informers.DeploymentInformer, rsInformer informers.ReplicaSetInformer, podInformer informers.PodInformer, client clientset.Interface) *DeploymentController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset.
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.Core().Events("")})
if client != nil && client.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("deployment_controller", client.Core().RESTClient().GetRateLimiter())
}
dc := &DeploymentController{
client: client,
eventRecorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "deployment-controller"}),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "deployment"),
}
dInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: dc.addDeploymentNotification,
UpdateFunc: dc.updateDeploymentNotification,
// This will enter the sync loop and no-op, because the deployment has been deleted from the store.
DeleteFunc: dc.deleteDeploymentNotification,
})
rsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: dc.addReplicaSet,
UpdateFunc: dc.updateReplicaSet,
DeleteFunc: dc.deleteReplicaSet,
})
dc.syncHandler = dc.syncDeployment
dc.dLister = dInformer.Lister()
dc.rsLister = rsInformer.Lister()
dc.podLister = podInformer.Lister()
dc.dListerSynced = dInformer.Informer().HasSynced
dc.rsListerSynced = dInformer.Informer().HasSynced
dc.podListerSynced = dInformer.Informer().HasSynced
return dc
}
// Run begins watching and syncing.
func (dc *DeploymentController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer dc.queue.ShutDown()
glog.Infof("Starting deployment controller")
if !cache.WaitForCacheSync(stopCh, dc.dListerSynced, dc.rsListerSynced, dc.podListerSynced) {
return
}
for i := 0; i < workers; i++ {
go wait.Until(dc.worker, time.Second, stopCh)
}
<-stopCh
glog.Infof("Shutting down deployment controller")
}
func (dc *DeploymentController) addDeploymentNotification(obj interface{}) {
d := obj.(*extensions.Deployment)
glog.V(4).Infof("Adding deployment %s", d.Name)
dc.enqueueDeployment(d)
}
func (dc *DeploymentController) updateDeploymentNotification(old, cur interface{}) {
oldD := old.(*extensions.Deployment)
glog.V(4).Infof("Updating deployment %s", oldD.Name)
// Resync on deployment object relist.
dc.enqueueDeployment(cur.(*extensions.Deployment))
}
func (dc *DeploymentController) deleteDeploymentNotification(obj interface{}) {
d, ok := obj.(*extensions.Deployment)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("Couldn't get object from tombstone %#v", obj)
return
}
d, ok = tombstone.Obj.(*extensions.Deployment)
if !ok {
glog.Errorf("Tombstone contained object that is not a Deployment %#v", obj)
return
}
}
glog.V(4).Infof("Deleting deployment %s", d.Name)
dc.enqueueDeployment(d)
}
// addReplicaSet enqueues the deployment that manages a ReplicaSet when the ReplicaSet is created.
func (dc *DeploymentController) addReplicaSet(obj interface{}) {
rs := obj.(*extensions.ReplicaSet)
glog.V(4).Infof("ReplicaSet %s added.", rs.Name)
if d := dc.getDeploymentForReplicaSet(rs); d != nil {
dc.enqueueDeployment(d)
}
}
// getDeploymentForReplicaSet returns the deployment managing the given ReplicaSet.
func (dc *DeploymentController) getDeploymentForReplicaSet(rs *extensions.ReplicaSet) *extensions.Deployment {
deployments, err := dc.dLister.GetDeploymentsForReplicaSet(rs)
if err != nil || len(deployments) == 0 {
glog.V(4).Infof("Error: %v. No deployment found for ReplicaSet %v, deployment controller will avoid syncing.", err, rs.Name)
return nil
}
// Because all ReplicaSet's belonging to a deployment should have a unique label key,
// there should never be more than one deployment returned by the above method.
// If that happens we should probably dynamically repair the situation by ultimately
// trying to clean up one of the controllers, for now we just return the older one
if len(deployments) > 1 {
sort.Sort(util.BySelectorLastUpdateTime(deployments))
glog.Errorf("user error! more than one deployment is selecting replica set %s/%s with labels: %#v, returning %s/%s", rs.Namespace, rs.Name, rs.Labels, deployments[0].Namespace, deployments[0].Name)
}
return deployments[0]
}
// updateReplicaSet figures out what deployment(s) manage a ReplicaSet when the ReplicaSet
// is updated and wake them up. If the anything of the ReplicaSets have changed, we need to
// awaken both the old and new deployments. old and cur must be *extensions.ReplicaSet
// types.
func (dc *DeploymentController) updateReplicaSet(old, cur interface{}) {
curRS := cur.(*extensions.ReplicaSet)
oldRS := old.(*extensions.ReplicaSet)
if curRS.ResourceVersion == oldRS.ResourceVersion {
// Periodic resync will send update events for all known replica sets.
// Two different versions of the same replica set will always have different RVs.
return
}
// TODO: Write a unittest for this case
glog.V(4).Infof("ReplicaSet %s updated.", curRS.Name)
if d := dc.getDeploymentForReplicaSet(curRS); d != nil {
dc.enqueueDeployment(d)
}
// A number of things could affect the old deployment: labels changing,
// pod template changing, etc.
if !api.Semantic.DeepEqual(oldRS, curRS) {
if oldD := dc.getDeploymentForReplicaSet(oldRS); oldD != nil {
dc.enqueueDeployment(oldD)
}
}
}
// deleteReplicaSet enqueues the deployment that manages a ReplicaSet when
// the ReplicaSet is deleted. obj could be an *extensions.ReplicaSet, or
// a DeletionFinalStateUnknown marker item.
func (dc *DeploymentController) deleteReplicaSet(obj interface{}) {
rs, ok := obj.(*extensions.ReplicaSet)
// When a delete is dropped, the relist will notice a pod in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the ReplicaSet
// changed labels the new deployment will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("Couldn't get object from tombstone %#v, could take up to %v before a deployment recreates/updates replicasets", obj, FullDeploymentResyncPeriod)
return
}
rs, ok = tombstone.Obj.(*extensions.ReplicaSet)
if !ok {
glog.Errorf("Tombstone contained object that is not a ReplicaSet %#v, could take up to %v before a deployment recreates/updates replicasets", obj, FullDeploymentResyncPeriod)
return
}
}
glog.V(4).Infof("ReplicaSet %s deleted.", rs.Name)
if d := dc.getDeploymentForReplicaSet(rs); d != nil {
dc.enqueueDeployment(d)
}
}
func (dc *DeploymentController) enqueueDeployment(deployment *extensions.Deployment) {
key, err := controller.KeyFunc(deployment)
if err != nil {
glog.Errorf("Couldn't get key for object %#v: %v", deployment, err)
return
}
dc.queue.Add(key)
}
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (dc *DeploymentController) worker() {
work := func() bool {
key, quit := dc.queue.Get()
if quit {
return true
}
defer dc.queue.Done(key)
err := dc.syncHandler(key.(string))
dc.handleErr(err, key)
return false
}
for {
if quit := work(); quit {
return
}
}
}
func (dc *DeploymentController) handleErr(err error, key interface{}) {
if err == nil {
dc.queue.Forget(key)
return
}
if dc.queue.NumRequeues(key) < MaxRetries {
glog.V(2).Infof("Error syncing deployment %v: %v", key, err)
dc.queue.AddRateLimited(key)
return
}
utilruntime.HandleError(err)
glog.V(2).Infof("Dropping deployment %q out of the queue: %v", key, err)
dc.queue.Forget(key)
}
// syncDeployment will sync the deployment with the given key.
// This function is not meant to be invoked concurrently with the same key.
func (dc *DeploymentController) syncDeployment(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing deployment %q (%v)", key, time.Now().Sub(startTime))
}()
obj, exists, err := dc.dLister.Indexer.GetByKey(key)
if err != nil {
glog.Errorf("Unable to retrieve deployment %v from store: %v", key, err)
return err
}
if !exists {
glog.Infof("Deployment has been deleted %v", key)
return nil
}
deployment := obj.(*extensions.Deployment)
// Deep-copy otherwise we are mutating our cache.
// TODO: Deep-copy only when needed.
d, err := util.DeploymentDeepCopy(deployment)
if err != nil {
return err
}
everything := metav1.LabelSelector{}
if reflect.DeepEqual(d.Spec.Selector, &everything) {
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.")
if d.Status.ObservedGeneration < d.Generation {
d.Status.ObservedGeneration = d.Generation
dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d)
}
return nil
}
if d.DeletionTimestamp != nil {
return dc.syncStatusOnly(d)
}
// Handle overlapping deployments by deterministically avoid syncing deployments that fight over ReplicaSets.
if err = dc.handleOverlap(d); err != nil {
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectorOverlap", err.Error())
return nil
}
// Update deployment conditions with an Unknown condition when pausing/resuming
// a deployment. In this way, we can be sure that we won't timeout when a user
// resumes a Deployment with a set progressDeadlineSeconds.
if err = dc.checkPausedConditions(d); err != nil {
return err
}
_, err = dc.hasFailed(d)
if err != nil {
return err
}
// TODO: Automatically rollback here if we failed above. Locate the last complete
// revision and populate the rollback spec with it.
// See https://github.com/kubernetes/kubernetes/issues/23211.
if d.Spec.Paused {
return dc.sync(d)
}
if d.Spec.RollbackTo != nil {
revision := d.Spec.RollbackTo.Revision
if d, err = dc.rollback(d, &revision); err != nil {
return err
}
}
scalingEvent, err := dc.isScalingEvent(d)
if err != nil {
return err
}
if scalingEvent {
return dc.sync(d)
}
switch d.Spec.Strategy.Type {
case extensions.RecreateDeploymentStrategyType:
return dc.rolloutRecreate(d)
case extensions.RollingUpdateDeploymentStrategyType:
return dc.rolloutRolling(d)
}
return fmt.Errorf("unexpected deployment strategy type: %s", d.Spec.Strategy.Type)
}
// handleOverlap relists all deployment in the same namespace for overlaps, and avoid syncing
// the newer overlapping ones (only sync the oldest one). New/old is determined by when the
// deployment's selector is last updated.
func (dc *DeploymentController) handleOverlap(d *extensions.Deployment) error {
deployments, err := dc.dLister.Deployments(d.Namespace).List(labels.Everything())
if err != nil {
return fmt.Errorf("error listing deployments in namespace %s: %v", d.Namespace, err)
}
overlapping := false
for _, other := range deployments {
foundOverlaps, err := util.OverlapsWith(d, other)
if err != nil {
return err
}
if foundOverlaps {
deploymentCopy, err := util.DeploymentDeepCopy(other)
if err != nil {
return err
}
overlapping = true
// Skip syncing this one if older overlapping one is found.
if util.SelectorUpdatedBefore(deploymentCopy, d) {
// We don't care if the overlapping annotation update failed or not (we don't make decision on it)
dc.markDeploymentOverlap(d, deploymentCopy.Name)
dc.clearDeploymentOverlap(deploymentCopy)
return fmt.Errorf("found deployment %s/%s has overlapping selector with an older deployment %s/%s, skip syncing it", d.Namespace, d.Name, deploymentCopy.Namespace, deploymentCopy.Name)
}
dc.markDeploymentOverlap(deploymentCopy, d.Name)
d, _ = dc.clearDeploymentOverlap(d)
}
}
if !overlapping {
// We don't care if the overlapping annotation update failed or not (we don't make decision on it)
d, _ = dc.clearDeploymentOverlap(d)
}
return nil
}
func (dc *DeploymentController) markDeploymentOverlap(deployment *extensions.Deployment, withDeployment string) (*extensions.Deployment, error) {
if deployment.Annotations[util.OverlapAnnotation] == withDeployment && deployment.Status.ObservedGeneration >= deployment.Generation {
return deployment, nil
}
if deployment.Annotations == nil {
deployment.Annotations = make(map[string]string)
}
// Update observedGeneration for overlapping deployments so that their deletion won't be blocked.
deployment.Status.ObservedGeneration = deployment.Generation
deployment.Annotations[util.OverlapAnnotation] = withDeployment
return dc.client.Extensions().Deployments(deployment.Namespace).UpdateStatus(deployment)
}
func (dc *DeploymentController) clearDeploymentOverlap(deployment *extensions.Deployment) (*extensions.Deployment, error) {
if len(deployment.Annotations[util.OverlapAnnotation]) == 0 {
return deployment, nil
}
delete(deployment.Annotations, util.OverlapAnnotation)
return dc.client.Extensions().Deployments(deployment.Namespace).UpdateStatus(deployment)
}

View file

@ -0,0 +1,286 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"fmt"
"testing"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/uuid"
)
var (
alwaysReady = func() bool { return true }
noTimestamp = metav1.Time{}
)
func rs(name string, replicas int, selector map[string]string, timestamp metav1.Time) *extensions.ReplicaSet {
return &extensions.ReplicaSet{
ObjectMeta: v1.ObjectMeta{
Name: name,
CreationTimestamp: timestamp,
Namespace: v1.NamespaceDefault,
},
Spec: extensions.ReplicaSetSpec{
Replicas: func() *int32 { i := int32(replicas); return &i }(),
Selector: &metav1.LabelSelector{MatchLabels: selector},
Template: v1.PodTemplateSpec{},
},
}
}
func newRSWithStatus(name string, specReplicas, statusReplicas int, selector map[string]string) *extensions.ReplicaSet {
rs := rs(name, specReplicas, selector, noTimestamp)
rs.Status = extensions.ReplicaSetStatus{
Replicas: int32(statusReplicas),
}
return rs
}
func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSurge, maxUnavailable *intstr.IntOrString, selector map[string]string) *extensions.Deployment {
d := extensions.Deployment{
TypeMeta: metav1.TypeMeta{APIVersion: registered.GroupOrDie(extensions.GroupName).GroupVersion.String()},
ObjectMeta: v1.ObjectMeta{
UID: uuid.NewUUID(),
Name: name,
Namespace: v1.NamespaceDefault,
},
Spec: extensions.DeploymentSpec{
Strategy: extensions.DeploymentStrategy{
Type: extensions.RollingUpdateDeploymentStrategyType,
RollingUpdate: &extensions.RollingUpdateDeployment{
MaxUnavailable: func() *intstr.IntOrString { i := intstr.FromInt(0); return &i }(),
MaxSurge: func() *intstr.IntOrString { i := intstr.FromInt(0); return &i }(),
},
},
Replicas: func() *int32 { i := int32(replicas); return &i }(),
Selector: &metav1.LabelSelector{MatchLabels: selector},
Template: v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: selector,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Image: "foo/bar",
},
},
},
},
RevisionHistoryLimit: revisionHistoryLimit,
},
}
if maxSurge != nil {
d.Spec.Strategy.RollingUpdate.MaxSurge = maxSurge
}
if maxUnavailable != nil {
d.Spec.Strategy.RollingUpdate.MaxUnavailable = maxUnavailable
}
return &d
}
func newReplicaSet(d *extensions.Deployment, name string, replicas int) *extensions.ReplicaSet {
return &extensions.ReplicaSet{
ObjectMeta: v1.ObjectMeta{
Name: name,
Namespace: v1.NamespaceDefault,
},
Spec: extensions.ReplicaSetSpec{
Replicas: func() *int32 { i := int32(replicas); return &i }(),
Template: d.Spec.Template,
},
}
}
func getKey(d *extensions.Deployment, t *testing.T) string {
if key, err := controller.KeyFunc(d); err != nil {
t.Errorf("Unexpected error getting key for deployment %v: %v", d.Name, err)
return ""
} else {
return key
}
}
type fixture struct {
t *testing.T
client *fake.Clientset
// Objects to put in the store.
dLister []*extensions.Deployment
rsLister []*extensions.ReplicaSet
podLister []*v1.Pod
// Actions expected to happen on the client. Objects from here are also
// preloaded into NewSimpleFake.
actions []core.Action
objects []runtime.Object
}
func (f *fixture) expectUpdateDeploymentAction(d *extensions.Deployment) {
f.actions = append(f.actions, core.NewUpdateAction(schema.GroupVersionResource{Resource: "deployments"}, d.Namespace, d))
}
func (f *fixture) expectUpdateDeploymentStatusAction(d *extensions.Deployment) {
action := core.NewUpdateAction(schema.GroupVersionResource{Resource: "deployments"}, d.Namespace, d)
action.Subresource = "status"
f.actions = append(f.actions, action)
}
func (f *fixture) expectCreateRSAction(rs *extensions.ReplicaSet) {
f.actions = append(f.actions, core.NewCreateAction(schema.GroupVersionResource{Resource: "replicasets"}, rs.Namespace, rs))
}
func newFixture(t *testing.T) *fixture {
f := &fixture{}
f.t = t
f.objects = []runtime.Object{}
return f
}
func (f *fixture) run(deploymentName string) {
f.client = fake.NewSimpleClientset(f.objects...)
informers := informers.NewSharedInformerFactory(f.client, nil, controller.NoResyncPeriodFunc())
c := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), f.client)
c.eventRecorder = &record.FakeRecorder{}
c.dListerSynced = alwaysReady
c.rsListerSynced = alwaysReady
c.podListerSynced = alwaysReady
for _, d := range f.dLister {
c.dLister.Indexer.Add(d)
}
for _, rs := range f.rsLister {
c.rsLister.Indexer.Add(rs)
}
for _, pod := range f.podLister {
c.podLister.Indexer.Add(pod)
}
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
err := c.syncDeployment(deploymentName)
if err != nil {
f.t.Errorf("error syncing deployment: %v", err)
}
actions := filterInformerActions(f.client.Actions())
for i, action := range actions {
if len(f.actions) < i+1 {
f.t.Errorf("%d unexpected actions: %+v", len(actions)-len(f.actions), actions[i:])
break
}
expectedAction := f.actions[i]
if !expectedAction.Matches(action.GetVerb(), action.GetResource().Resource) {
f.t.Errorf("Expected\n\t%#v\ngot\n\t%#v", expectedAction, action)
continue
}
}
if len(f.actions) > len(actions) {
f.t.Errorf("%d additional expected actions:%+v", len(f.actions)-len(actions), f.actions[len(actions):])
}
}
func TestSyncDeploymentCreatesReplicaSet(t *testing.T) {
f := newFixture(t)
d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
f.dLister = append(f.dLister, d)
f.objects = append(f.objects, d)
rs := newReplicaSet(d, "deploymentrs-4186632231", 1)
f.expectCreateRSAction(rs)
f.expectUpdateDeploymentAction(d)
f.expectUpdateDeploymentStatusAction(d)
f.run(getKey(d, t))
}
func TestSyncDeploymentDontDoAnythingDuringDeletion(t *testing.T) {
f := newFixture(t)
d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
now := metav1.Now()
d.DeletionTimestamp = &now
f.dLister = append(f.dLister, d)
f.run(getKey(d, t))
}
// issue: https://github.com/kubernetes/kubernetes/issues/23218
func TestDeploymentController_dontSyncDeploymentsWithEmptyPodSelector(t *testing.T) {
fake := &fake.Clientset{}
informers := informers.NewSharedInformerFactory(fake, nil, controller.NoResyncPeriodFunc())
controller := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), fake)
controller.eventRecorder = &record.FakeRecorder{}
controller.dListerSynced = alwaysReady
controller.rsListerSynced = alwaysReady
controller.podListerSynced = alwaysReady
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
empty := metav1.LabelSelector{}
d.Spec.Selector = &empty
controller.dLister.Indexer.Add(d)
// We expect the deployment controller to not take action here since it's configuration
// is invalid, even though no replicasets exist that match it's selector.
controller.syncDeployment(fmt.Sprintf("%s/%s", d.ObjectMeta.Namespace, d.ObjectMeta.Name))
filteredActions := filterInformerActions(fake.Actions())
if len(filteredActions) == 0 {
return
}
for _, action := range filteredActions {
t.Logf("unexpected action: %#v", action)
}
t.Errorf("expected deployment controller to not take action")
}
func filterInformerActions(actions []core.Action) []core.Action {
ret := []core.Action{}
for _, action := range actions {
if len(action.GetNamespace()) == 0 &&
(action.Matches("list", "pods") ||
action.Matches("list", "deployments") ||
action.Matches("list", "replicasets") ||
action.Matches("watch", "pods") ||
action.Matches("watch", "deployments") ||
action.Matches("watch", "replicasets")) {
continue
}
ret = append(ret, action)
}
return ret
}

View file

@ -0,0 +1,189 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"fmt"
"reflect"
"k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/controller/deployment/util"
)
// hasFailed determines if a deployment has failed or not by estimating its progress.
// Progress for a deployment is considered when a new replica set is created or adopted,
// and when new pods scale up or old pods scale down. Progress is not estimated for paused
// deployments or when users don't really care about it ie. progressDeadlineSeconds is not
// specified.
func (dc *DeploymentController) hasFailed(d *extensions.Deployment) (bool, error) {
if d.Spec.ProgressDeadlineSeconds == nil || d.Spec.RollbackTo != nil || d.Spec.Paused {
return false, nil
}
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, false)
if err != nil {
return false, err
}
// There is a template change so we don't need to check for any progress right now.
if newRS == nil {
return false, nil
}
// Look at the status of the deployment - if there is already a NewRSAvailableReason
// then we don't need to estimate any progress. This is needed in order to avoid
// estimating progress for scaling events after a rollout has finished.
cond := util.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing)
if cond != nil && cond.Reason == util.NewRSAvailableReason {
return false, nil
}
// TODO: Look for permanent failures here.
// See https://github.com/kubernetes/kubernetes/issues/18568
allRSs := append(oldRSs, newRS)
newStatus := dc.calculateStatus(allRSs, newRS, d)
// If the deployment is complete or it is progressing, there is no need to check if it
// has timed out.
if util.DeploymentComplete(d, &newStatus) || util.DeploymentProgressing(d, &newStatus) {
return false, nil
}
// Check if the deployment has timed out.
return util.DeploymentTimedOut(d, &newStatus), nil
}
// syncRolloutStatus updates the status of a deployment during a rollout. There are
// cases this helper will run that cannot be prevented from the scaling detection,
// for example a resync of the deployment after it was scaled up. In those cases,
// we shouldn't try to estimate any progress.
func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, d *extensions.Deployment) error {
newStatus := dc.calculateStatus(allRSs, newRS, d)
// If there is no progressDeadlineSeconds set, remove any Progressing condition.
if d.Spec.ProgressDeadlineSeconds == nil {
util.RemoveDeploymentCondition(&newStatus, extensions.DeploymentProgressing)
}
// If there is only one replica set that is active then that means we are not running
// a new rollout and this is a resync where we don't need to estimate any progress.
// In such a case, we should simply not estimate any progress for this deployment.
currentCond := util.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing)
isResyncEvent := newStatus.Replicas == newStatus.UpdatedReplicas && currentCond != nil && currentCond.Reason == util.NewRSAvailableReason
// Check for progress only if there is a progress deadline set and the latest rollout
// hasn't completed yet. We also need to ensure the new replica set exists, otherwise
// we cannot estimate any progress.
if d.Spec.ProgressDeadlineSeconds != nil && !isResyncEvent && newRS != nil {
switch {
case util.DeploymentComplete(d, &newStatus):
// Update the deployment conditions with a message for the new replica set that
// was successfully deployed. If the condition already exists, we ignore this update.
msg := fmt.Sprintf("Replica set %q has successfully progressed.", newRS.Name)
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, util.NewRSAvailableReason, msg)
util.SetDeploymentCondition(&newStatus, *condition)
case util.DeploymentProgressing(d, &newStatus):
// If there is any progress made, continue by not checking if the deployment failed. This
// behavior emulates the rolling updater progressDeadline check.
msg := fmt.Sprintf("Replica set %q is progressing.", newRS.Name)
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, util.ReplicaSetUpdatedReason, msg)
// Update the current Progressing condition or add a new one if it doesn't exist.
// If a Progressing condition with status=true already exists, we should update
// everything but lastTransitionTime. SetDeploymentCondition already does that but
// it also is not updating conditions when the reason of the new condition is the
// same as the old. The Progressing condition is a special case because we want to
// update with the same reason and change just lastUpdateTime iff we notice any
// progress. That's why we handle it here.
if currentCond != nil {
if currentCond.Status == v1.ConditionTrue {
condition.LastTransitionTime = currentCond.LastTransitionTime
}
util.RemoveDeploymentCondition(&newStatus, extensions.DeploymentProgressing)
}
util.SetDeploymentCondition(&newStatus, *condition)
case util.DeploymentTimedOut(d, &newStatus):
// Update the deployment with a timeout condition. If the condition already exists,
// we ignore this update.
msg := fmt.Sprintf("Replica set %q has timed out progressing.", newRS.Name)
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionFalse, util.TimedOutReason, msg)
util.SetDeploymentCondition(&newStatus, *condition)
}
}
// Move failure conditions of all replica sets in deployment conditions. For now,
// only one failure condition is returned from getReplicaFailures.
if replicaFailureCond := dc.getReplicaFailures(allRSs, newRS); len(replicaFailureCond) > 0 {
// There will be only one ReplicaFailure condition on the replica set.
util.SetDeploymentCondition(&newStatus, replicaFailureCond[0])
} else {
util.RemoveDeploymentCondition(&newStatus, extensions.DeploymentReplicaFailure)
}
// Do not update if there is nothing new to add.
if reflect.DeepEqual(d.Status, newStatus) {
// TODO: If there is no sign of progress at this point then there is a high chance that the
// deployment is stuck. We should resync this deployment at some point[1] in the future[2] and
// check if it has timed out. We definitely need this, otherwise we depend on the controller
// resync interval. See https://github.com/kubernetes/kubernetes/issues/34458.
//
// [1] time.Now() + progressDeadlineSeconds - lastUpdateTime (of the Progressing condition).
// [2] Use dc.queue.AddAfter
return nil
}
newDeployment := d
newDeployment.Status = newStatus
_, err := dc.client.Extensions().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
return err
}
// getReplicaFailures will convert replica failure conditions from replica sets
// to deployment conditions.
func (dc *DeploymentController) getReplicaFailures(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) []extensions.DeploymentCondition {
var conditions []extensions.DeploymentCondition
if newRS != nil {
for _, c := range newRS.Status.Conditions {
if c.Type != extensions.ReplicaSetReplicaFailure {
continue
}
conditions = append(conditions, util.ReplicaSetToDeploymentCondition(c))
}
}
// Return failures for the new replica set over failures from old replica sets.
if len(conditions) > 0 {
return conditions
}
for i := range allRSs {
rs := allRSs[i]
if rs == nil {
continue
}
for _, c := range rs.Status.Conditions {
if c.Type != extensions.ReplicaSetReplicaFailure {
continue
}
conditions = append(conditions, util.ReplicaSetToDeploymentCondition(c))
}
}
return conditions
}

View file

@ -0,0 +1,138 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"fmt"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/retry"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/util/wait"
)
// rolloutRecreate implements the logic for recreating a replica set.
func (dc *DeploymentController) rolloutRecreate(deployment *extensions.Deployment) error {
// Don't create a new RS if not already existed, so that we avoid scaling up before scaling down
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(deployment, false)
if err != nil {
return err
}
allRSs := append(oldRSs, newRS)
activeOldRSs := controller.FilterActiveReplicaSets(oldRSs)
// scale down old replica sets
scaledDown, err := dc.scaleDownOldReplicaSetsForRecreate(activeOldRSs, deployment)
if err != nil {
return err
}
if scaledDown {
// Update DeploymentStatus
return dc.syncRolloutStatus(allRSs, newRS, deployment)
}
// Wait for all old replica set to scale down to zero.
if err := dc.waitForInactiveReplicaSets(activeOldRSs); err != nil {
return err
}
// If we need to create a new RS, create it now
// TODO: Create a new RS without re-listing all RSs.
if newRS == nil {
newRS, oldRSs, err = dc.getAllReplicaSetsAndSyncRevision(deployment, true)
if err != nil {
return err
}
allRSs = append(oldRSs, newRS)
}
// scale up new replica set
scaledUp, err := dc.scaleUpNewReplicaSetForRecreate(newRS, deployment)
if err != nil {
return err
}
if scaledUp {
// Update DeploymentStatus
return dc.syncRolloutStatus(allRSs, newRS, deployment)
}
dc.cleanupDeployment(oldRSs, deployment)
// Sync deployment status
return dc.syncRolloutStatus(allRSs, newRS, deployment)
}
// scaleDownOldReplicaSetsForRecreate scales down old replica sets when deployment strategy is "Recreate"
func (dc *DeploymentController) scaleDownOldReplicaSetsForRecreate(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) {
scaled := false
for i := range oldRSs {
rs := oldRSs[i]
// Scaling not required.
if *(rs.Spec.Replicas) == 0 {
continue
}
scaledRS, updatedRS, err := dc.scaleReplicaSetAndRecordEvent(rs, 0, deployment)
if err != nil {
return false, err
}
if scaledRS {
oldRSs[i] = updatedRS
scaled = true
}
}
return scaled, nil
}
// waitForInactiveReplicaSets will wait until all passed replica sets are inactive and have been noticed
// by the replica set controller.
func (dc *DeploymentController) waitForInactiveReplicaSets(oldRSs []*extensions.ReplicaSet) error {
for i := range oldRSs {
rs := oldRSs[i]
desiredGeneration := rs.Generation
observedGeneration := rs.Status.ObservedGeneration
specReplicas := *(rs.Spec.Replicas)
statusReplicas := rs.Status.Replicas
if err := wait.ExponentialBackoff(retry.DefaultRetry, func() (bool, error) {
replicaSet, err := dc.rsLister.ReplicaSets(rs.Namespace).Get(rs.Name)
if err != nil {
return false, err
}
specReplicas = *(replicaSet.Spec.Replicas)
statusReplicas = replicaSet.Status.Replicas
observedGeneration = replicaSet.Status.ObservedGeneration
// TODO: We also need to wait for terminating replicas to actually terminate.
// See https://github.com/kubernetes/kubernetes/issues/32567
return observedGeneration >= desiredGeneration && *(replicaSet.Spec.Replicas) == 0 && replicaSet.Status.Replicas == 0, nil
}); err != nil {
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("replica set %q never became inactive: synced=%t, spec.replicas=%d, status.replicas=%d",
rs.Name, observedGeneration >= desiredGeneration, specReplicas, statusReplicas)
}
return err
}
}
return nil
}
// scaleUpNewReplicaSetForRecreate scales up new replica set when deployment strategy is "Recreate"
func (dc *DeploymentController) scaleUpNewReplicaSetForRecreate(newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) {
scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, *(deployment.Spec.Replicas), deployment)
return scaled, err
}

View file

@ -0,0 +1,106 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"fmt"
"reflect"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
)
// Rolling back to a revision; no-op if the toRevision is deployment's current revision
func (dc *DeploymentController) rollback(deployment *extensions.Deployment, toRevision *int64) (*extensions.Deployment, error) {
newRS, allOldRSs, err := dc.getAllReplicaSetsAndSyncRevision(deployment, true)
if err != nil {
return nil, err
}
allRSs := append(allOldRSs, newRS)
// If rollback revision is 0, rollback to the last revision
if *toRevision == 0 {
if *toRevision = deploymentutil.LastRevision(allRSs); *toRevision == 0 {
// If we still can't find the last revision, gives up rollback
dc.emitRollbackWarningEvent(deployment, deploymentutil.RollbackRevisionNotFound, "Unable to find last revision.")
// Gives up rollback
return dc.updateDeploymentAndClearRollbackTo(deployment)
}
}
for _, rs := range allRSs {
v, err := deploymentutil.Revision(rs)
if err != nil {
glog.V(4).Infof("Unable to extract revision from deployment's replica set %q: %v", rs.Name, err)
continue
}
if v == *toRevision {
glog.V(4).Infof("Found replica set %q with desired revision %d", rs.Name, v)
// rollback by copying podTemplate.Spec from the replica set
// revision number will be incremented during the next getAllReplicaSetsAndSyncRevision call
// no-op if the the spec matches current deployment's podTemplate.Spec
deployment, performedRollback, err := dc.rollbackToTemplate(deployment, rs)
if performedRollback && err == nil {
dc.emitRollbackNormalEvent(deployment, fmt.Sprintf("Rolled back deployment %q to revision %d", deployment.Name, *toRevision))
}
return deployment, err
}
}
dc.emitRollbackWarningEvent(deployment, deploymentutil.RollbackRevisionNotFound, "Unable to find the revision to rollback to.")
// Gives up rollback
return dc.updateDeploymentAndClearRollbackTo(deployment)
}
func (dc *DeploymentController) rollbackToTemplate(deployment *extensions.Deployment, rs *extensions.ReplicaSet) (d *extensions.Deployment, performedRollback bool, err error) {
if !reflect.DeepEqual(deploymentutil.GetNewReplicaSetTemplate(deployment), rs.Spec.Template) {
glog.Infof("Rolling back deployment %s to template spec %+v", deployment.Name, rs.Spec.Template.Spec)
deploymentutil.SetFromReplicaSetTemplate(deployment, rs.Spec.Template)
// set RS (the old RS we'll rolling back to) annotations back to the deployment;
// otherwise, the deployment's current annotations (should be the same as current new RS) will be copied to the RS after the rollback.
//
// For example,
// A Deployment has old RS1 with annotation {change-cause:create}, and new RS2 {change-cause:edit}.
// Note that both annotations are copied from Deployment, and the Deployment should be annotated {change-cause:edit} as well.
// Now, rollback Deployment to RS1, we should update Deployment's pod-template and also copy annotation from RS1.
// Deployment is now annotated {change-cause:create}, and we have new RS1 {change-cause:create}, old RS2 {change-cause:edit}.
//
// If we don't copy the annotations back from RS to deployment on rollback, the Deployment will stay as {change-cause:edit},
// and new RS1 becomes {change-cause:edit} (copied from deployment after rollback), old RS2 {change-cause:edit}, which is not correct.
deploymentutil.SetDeploymentAnnotationsTo(deployment, rs)
performedRollback = true
} else {
glog.V(4).Infof("Rolling back to a revision that contains the same template as current deployment %s, skipping rollback...", deployment.Name)
dc.emitRollbackWarningEvent(deployment, deploymentutil.RollbackTemplateUnchanged, fmt.Sprintf("The rollback revision contains the same template as current deployment %q", deployment.Name))
}
d, err = dc.updateDeploymentAndClearRollbackTo(deployment)
return
}
func (dc *DeploymentController) emitRollbackWarningEvent(deployment *extensions.Deployment, reason, message string) {
dc.eventRecorder.Eventf(deployment, v1.EventTypeWarning, reason, message)
}
func (dc *DeploymentController) emitRollbackNormalEvent(deployment *extensions.Deployment, message string) {
dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, deploymentutil.RollbackDone, message)
}
// updateDeploymentAndClearRollbackTo sets .spec.rollbackTo to nil and update the input deployment
func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(deployment *extensions.Deployment) (*extensions.Deployment, error) {
glog.V(4).Infof("Cleans up rollbackTo of deployment %s", deployment.Name)
deployment.Spec.RollbackTo = nil
return dc.client.Extensions().Deployments(deployment.ObjectMeta.Namespace).Update(deployment)
}

View file

@ -0,0 +1,229 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"fmt"
"sort"
"github.com/golang/glog"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/controller"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/util/integer"
)
// rolloutRolling implements the logic for rolling a new replica set.
func (dc *DeploymentController) rolloutRolling(deployment *extensions.Deployment) error {
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(deployment, true)
if err != nil {
return err
}
allRSs := append(oldRSs, newRS)
// Scale up, if we can.
scaledUp, err := dc.reconcileNewReplicaSet(allRSs, newRS, deployment)
if err != nil {
return err
}
if scaledUp {
// Update DeploymentStatus
return dc.syncRolloutStatus(allRSs, newRS, deployment)
}
// Scale down, if we can.
scaledDown, err := dc.reconcileOldReplicaSets(allRSs, controller.FilterActiveReplicaSets(oldRSs), newRS, deployment)
if err != nil {
return err
}
if scaledDown {
// Update DeploymentStatus
return dc.syncRolloutStatus(allRSs, newRS, deployment)
}
dc.cleanupDeployment(oldRSs, deployment)
// Sync deployment status
return dc.syncRolloutStatus(allRSs, newRS, deployment)
}
func (dc *DeploymentController) reconcileNewReplicaSet(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) {
if *(newRS.Spec.Replicas) == *(deployment.Spec.Replicas) {
// Scaling not required.
return false, nil
}
if *(newRS.Spec.Replicas) > *(deployment.Spec.Replicas) {
// Scale down.
scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, *(deployment.Spec.Replicas), deployment)
return scaled, err
}
newReplicasCount, err := deploymentutil.NewRSNewReplicas(deployment, allRSs, newRS)
if err != nil {
return false, err
}
scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, newReplicasCount, deployment)
return scaled, err
}
func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) {
oldPodsCount := deploymentutil.GetReplicaCountForReplicaSets(oldRSs)
if oldPodsCount == 0 {
// Can't scale down further
return false, nil
}
allPodsCount := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
glog.V(4).Infof("New replica set %s/%s has %d available pods.", newRS.Namespace, newRS.Name, newRS.Status.AvailableReplicas)
maxUnavailable := deploymentutil.MaxUnavailable(*deployment)
// Check if we can scale down. We can scale down in the following 2 cases:
// * Some old replica sets have unhealthy replicas, we could safely scale down those unhealthy replicas since that won't further
// increase unavailability.
// * New replica set has scaled up and it's replicas becomes ready, then we can scale down old replica sets in a further step.
//
// maxScaledDown := allPodsCount - minAvailable - newReplicaSetPodsUnavailable
// take into account not only maxUnavailable and any surge pods that have been created, but also unavailable pods from
// the newRS, so that the unavailable pods from the newRS would not make us scale down old replica sets in a further
// step(that will increase unavailability).
//
// Concrete example:
//
// * 10 replicas
// * 2 maxUnavailable (absolute number, not percent)
// * 3 maxSurge (absolute number, not percent)
//
// case 1:
// * Deployment is updated, newRS is created with 3 replicas, oldRS is scaled down to 8, and newRS is scaled up to 5.
// * The new replica set pods crashloop and never become available.
// * allPodsCount is 13. minAvailable is 8. newRSPodsUnavailable is 5.
// * A node fails and causes one of the oldRS pods to become unavailable. However, 13 - 8 - 5 = 0, so the oldRS won't be scaled down.
// * The user notices the crashloop and does kubectl rollout undo to rollback.
// * newRSPodsUnavailable is 1, since we rolled back to the good replica set, so maxScaledDown = 13 - 8 - 1 = 4. 4 of the crashlooping pods will be scaled down.
// * The total number of pods will then be 9 and the newRS can be scaled up to 10.
//
// case 2:
// Same example, but pushing a new pod template instead of rolling back (aka "roll over"):
// * The new replica set created must start with 0 replicas because allPodsCount is already at 13.
// * However, newRSPodsUnavailable would also be 0, so the 2 old replica sets could be scaled down by 5 (13 - 8 - 0), which would then
// allow the new replica set to be scaled up by 5.
minAvailable := *(deployment.Spec.Replicas) - maxUnavailable
newRSUnavailablePodCount := *(newRS.Spec.Replicas) - newRS.Status.AvailableReplicas
maxScaledDown := allPodsCount - minAvailable - newRSUnavailablePodCount
if maxScaledDown <= 0 {
return false, nil
}
// Clean up unhealthy replicas first, otherwise unhealthy replicas will block deployment
// and cause timeout. See https://github.com/kubernetes/kubernetes/issues/16737
oldRSs, cleanupCount, err := dc.cleanupUnhealthyReplicas(oldRSs, deployment, maxScaledDown)
if err != nil {
return false, nil
}
glog.V(4).Infof("Cleaned up unhealthy replicas from old RSes by %d", cleanupCount)
// Scale down old replica sets, need check maxUnavailable to ensure we can scale down
allRSs = append(oldRSs, newRS)
scaledDownCount, err := dc.scaleDownOldReplicaSetsForRollingUpdate(allRSs, oldRSs, deployment)
if err != nil {
return false, nil
}
glog.V(4).Infof("Scaled down old RSes of deployment %s by %d", deployment.Name, scaledDownCount)
totalScaledDown := cleanupCount + scaledDownCount
return totalScaledDown > 0, nil
}
// cleanupUnhealthyReplicas will scale down old replica sets with unhealthy replicas, so that all unhealthy replicas will be deleted.
func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment, maxCleanupCount int32) ([]*extensions.ReplicaSet, int32, error) {
sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))
// Safely scale down all old replica sets with unhealthy replicas. Replica set will sort the pods in the order
// such that not-ready < ready, unscheduled < scheduled, and pending < running. This ensures that unhealthy replicas will
// been deleted first and won't increase unavailability.
totalScaledDown := int32(0)
for i, targetRS := range oldRSs {
if totalScaledDown >= maxCleanupCount {
break
}
if *(targetRS.Spec.Replicas) == 0 {
// cannot scale down this replica set.
continue
}
glog.V(4).Infof("Found %d available pods in old RS %s/%s", targetRS.Status.AvailableReplicas, targetRS.Namespace, targetRS.Name)
if *(targetRS.Spec.Replicas) == targetRS.Status.AvailableReplicas {
// no unhealthy replicas found, no scaling required.
continue
}
scaledDownCount := int32(integer.IntMin(int(maxCleanupCount-totalScaledDown), int(*(targetRS.Spec.Replicas)-targetRS.Status.AvailableReplicas)))
newReplicasCount := *(targetRS.Spec.Replicas) - scaledDownCount
if newReplicasCount > *(targetRS.Spec.Replicas) {
return nil, 0, fmt.Errorf("when cleaning up unhealthy replicas, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, *(targetRS.Spec.Replicas), newReplicasCount)
}
_, updatedOldRS, err := dc.scaleReplicaSetAndRecordEvent(targetRS, newReplicasCount, deployment)
if err != nil {
return nil, totalScaledDown, err
}
totalScaledDown += scaledDownCount
oldRSs[i] = updatedOldRS
}
return oldRSs, totalScaledDown, nil
}
// scaleDownOldReplicaSetsForRollingUpdate scales down old replica sets when deployment strategy is "RollingUpdate".
// Need check maxUnavailable to ensure availability
func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs []*extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) (int32, error) {
maxUnavailable := deploymentutil.MaxUnavailable(*deployment)
// Check if we can scale down.
minAvailable := *(deployment.Spec.Replicas) - maxUnavailable
// Find the number of available pods.
availablePodCount := deploymentutil.GetAvailableReplicaCountForReplicaSets(allRSs)
if availablePodCount <= minAvailable {
// Cannot scale down.
return 0, nil
}
glog.V(4).Infof("Found %d available pods in deployment %s, scaling down old RSes", availablePodCount, deployment.Name)
sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))
totalScaledDown := int32(0)
totalScaleDownCount := availablePodCount - minAvailable
for _, targetRS := range oldRSs {
if totalScaledDown >= totalScaleDownCount {
// No further scaling required.
break
}
if *(targetRS.Spec.Replicas) == 0 {
// cannot scale down this ReplicaSet.
continue
}
// Scale down.
scaleDownCount := int32(integer.IntMin(int(*(targetRS.Spec.Replicas)), int(totalScaleDownCount-totalScaledDown)))
newReplicasCount := *(targetRS.Spec.Replicas) - scaleDownCount
if newReplicasCount > *(targetRS.Spec.Replicas) {
return 0, fmt.Errorf("when scaling down old RS, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, *(targetRS.Spec.Replicas), newReplicasCount)
}
_, _, err := dc.scaleReplicaSetAndRecordEvent(targetRS, newReplicasCount, deployment)
if err != nil {
return totalScaledDown, err
}
totalScaledDown += scaleDownCount
}
return totalScaledDown, nil
}

View file

@ -0,0 +1,379 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"testing"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/util/intstr"
)
func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
tests := []struct {
deploymentReplicas int
maxSurge intstr.IntOrString
oldReplicas int
newReplicas int
scaleExpected bool
expectedNewReplicas int
}{
{
// Should not scale up.
deploymentReplicas: 10,
maxSurge: intstr.FromInt(0),
oldReplicas: 10,
newReplicas: 0,
scaleExpected: false,
},
{
deploymentReplicas: 10,
maxSurge: intstr.FromInt(2),
oldReplicas: 10,
newReplicas: 0,
scaleExpected: true,
expectedNewReplicas: 2,
},
{
deploymentReplicas: 10,
maxSurge: intstr.FromInt(2),
oldReplicas: 5,
newReplicas: 0,
scaleExpected: true,
expectedNewReplicas: 7,
},
{
deploymentReplicas: 10,
maxSurge: intstr.FromInt(2),
oldReplicas: 10,
newReplicas: 2,
scaleExpected: false,
},
{
// Should scale down.
deploymentReplicas: 10,
maxSurge: intstr.FromInt(2),
oldReplicas: 2,
newReplicas: 11,
scaleExpected: true,
expectedNewReplicas: 10,
},
}
for i := range tests {
test := tests[i]
t.Logf("executing scenario %d", i)
newRS := rs("foo-v2", test.newReplicas, nil, noTimestamp)
oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp)
allRSs := []*extensions.ReplicaSet{newRS, oldRS}
maxUnavailable := intstr.FromInt(0)
deployment := newDeployment("foo", test.deploymentReplicas, nil, &test.maxSurge, &maxUnavailable, map[string]string{"foo": "bar"})
fake := fake.Clientset{}
controller := &DeploymentController{
client: &fake,
eventRecorder: &record.FakeRecorder{},
}
scaled, err := controller.reconcileNewReplicaSet(allRSs, newRS, deployment)
if err != nil {
t.Errorf("unexpected error: %v", err)
continue
}
if !test.scaleExpected {
if scaled || len(fake.Actions()) > 0 {
t.Errorf("unexpected scaling: %v", fake.Actions())
}
continue
}
if test.scaleExpected && !scaled {
t.Errorf("expected scaling to occur")
continue
}
if len(fake.Actions()) != 1 {
t.Errorf("expected 1 action during scale, got: %v", fake.Actions())
continue
}
updated := fake.Actions()[0].(core.UpdateAction).GetObject().(*extensions.ReplicaSet)
if e, a := test.expectedNewReplicas, int(*(updated.Spec.Replicas)); e != a {
t.Errorf("expected update to %d replicas, got %d", e, a)
}
}
}
func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) {
tests := []struct {
deploymentReplicas int
maxUnavailable intstr.IntOrString
oldReplicas int
newReplicas int
readyPodsFromOldRS int
readyPodsFromNewRS int
scaleExpected bool
expectedOldReplicas int
}{
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(0),
oldReplicas: 10,
newReplicas: 0,
readyPodsFromOldRS: 10,
readyPodsFromNewRS: 0,
scaleExpected: true,
expectedOldReplicas: 9,
},
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
oldReplicas: 10,
newReplicas: 0,
readyPodsFromOldRS: 10,
readyPodsFromNewRS: 0,
scaleExpected: true,
expectedOldReplicas: 8,
},
{ // expect unhealthy replicas from old replica sets been cleaned up
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
oldReplicas: 10,
newReplicas: 0,
readyPodsFromOldRS: 8,
readyPodsFromNewRS: 0,
scaleExpected: true,
expectedOldReplicas: 8,
},
{ // expect 1 unhealthy replica from old replica sets been cleaned up, and 1 ready pod been scaled down
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
oldReplicas: 10,
newReplicas: 0,
readyPodsFromOldRS: 9,
readyPodsFromNewRS: 0,
scaleExpected: true,
expectedOldReplicas: 8,
},
{ // the unavailable pods from the newRS would not make us scale down old RSs in a further step
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
oldReplicas: 8,
newReplicas: 2,
readyPodsFromOldRS: 8,
readyPodsFromNewRS: 0,
scaleExpected: false,
},
}
for i := range tests {
test := tests[i]
t.Logf("executing scenario %d", i)
newSelector := map[string]string{"foo": "new"}
oldSelector := map[string]string{"foo": "old"}
newRS := rs("foo-new", test.newReplicas, newSelector, noTimestamp)
newRS.Status.AvailableReplicas = int32(test.readyPodsFromNewRS)
oldRS := rs("foo-old", test.oldReplicas, oldSelector, noTimestamp)
oldRS.Status.AvailableReplicas = int32(test.readyPodsFromOldRS)
oldRSs := []*extensions.ReplicaSet{oldRS}
allRSs := []*extensions.ReplicaSet{oldRS, newRS}
maxSurge := intstr.FromInt(0)
deployment := newDeployment("foo", test.deploymentReplicas, nil, &maxSurge, &test.maxUnavailable, newSelector)
fakeClientset := fake.Clientset{}
controller := &DeploymentController{
client: &fakeClientset,
eventRecorder: &record.FakeRecorder{},
}
scaled, err := controller.reconcileOldReplicaSets(allRSs, oldRSs, newRS, deployment)
if err != nil {
t.Errorf("unexpected error: %v", err)
continue
}
if !test.scaleExpected && scaled {
t.Errorf("unexpected scaling: %v", fakeClientset.Actions())
}
if test.scaleExpected && !scaled {
t.Errorf("expected scaling to occur")
continue
}
continue
}
}
func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) {
tests := []struct {
oldReplicas int
readyPods int
unHealthyPods int
maxCleanupCount int
cleanupCountExpected int
}{
{
oldReplicas: 10,
readyPods: 8,
unHealthyPods: 2,
maxCleanupCount: 1,
cleanupCountExpected: 1,
},
{
oldReplicas: 10,
readyPods: 8,
unHealthyPods: 2,
maxCleanupCount: 3,
cleanupCountExpected: 2,
},
{
oldReplicas: 10,
readyPods: 8,
unHealthyPods: 2,
maxCleanupCount: 0,
cleanupCountExpected: 0,
},
{
oldReplicas: 10,
readyPods: 10,
unHealthyPods: 0,
maxCleanupCount: 3,
cleanupCountExpected: 0,
},
}
for i, test := range tests {
t.Logf("executing scenario %d", i)
oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp)
oldRS.Status.AvailableReplicas = int32(test.readyPods)
oldRSs := []*extensions.ReplicaSet{oldRS}
maxSurge := intstr.FromInt(2)
maxUnavailable := intstr.FromInt(2)
deployment := newDeployment("foo", 10, nil, &maxSurge, &maxUnavailable, nil)
fakeClientset := fake.Clientset{}
controller := &DeploymentController{
client: &fakeClientset,
eventRecorder: &record.FakeRecorder{},
}
_, cleanupCount, err := controller.cleanupUnhealthyReplicas(oldRSs, deployment, int32(test.maxCleanupCount))
if err != nil {
t.Errorf("unexpected error: %v", err)
continue
}
if int(cleanupCount) != test.cleanupCountExpected {
t.Errorf("expected %v unhealthy replicas been cleaned up, got %v", test.cleanupCountExpected, cleanupCount)
continue
}
}
}
func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing.T) {
tests := []struct {
deploymentReplicas int
maxUnavailable intstr.IntOrString
readyPods int
oldReplicas int
scaleExpected bool
expectedOldReplicas int
}{
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(0),
readyPods: 10,
oldReplicas: 10,
scaleExpected: true,
expectedOldReplicas: 9,
},
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
readyPods: 10,
oldReplicas: 10,
scaleExpected: true,
expectedOldReplicas: 8,
},
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
readyPods: 8,
oldReplicas: 10,
scaleExpected: false,
},
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
readyPods: 10,
oldReplicas: 0,
scaleExpected: false,
},
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
readyPods: 1,
oldReplicas: 10,
scaleExpected: false,
},
}
for i := range tests {
test := tests[i]
t.Logf("executing scenario %d", i)
oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp)
oldRS.Status.AvailableReplicas = int32(test.readyPods)
allRSs := []*extensions.ReplicaSet{oldRS}
oldRSs := []*extensions.ReplicaSet{oldRS}
maxSurge := intstr.FromInt(0)
deployment := newDeployment("foo", test.deploymentReplicas, nil, &maxSurge, &test.maxUnavailable, map[string]string{"foo": "bar"})
fakeClientset := fake.Clientset{}
controller := &DeploymentController{
client: &fakeClientset,
eventRecorder: &record.FakeRecorder{},
}
scaled, err := controller.scaleDownOldReplicaSetsForRollingUpdate(allRSs, oldRSs, deployment)
if err != nil {
t.Errorf("unexpected error: %v", err)
continue
}
if !test.scaleExpected {
if scaled != 0 {
t.Errorf("unexpected scaling: %v", fakeClientset.Actions())
}
continue
}
if test.scaleExpected && scaled == 0 {
t.Errorf("expected scaling to occur; actions: %v", fakeClientset.Actions())
continue
}
// There are both list and update actions logged, so extract the update
// action for verification.
var updateAction core.UpdateAction
for _, action := range fakeClientset.Actions() {
switch a := action.(type) {
case core.UpdateAction:
if updateAction != nil {
t.Errorf("expected only 1 update action; had %v and found %v", updateAction, a)
} else {
updateAction = a
}
}
}
if updateAction == nil {
t.Errorf("expected an update action")
continue
}
updated := updateAction.GetObject().(*extensions.ReplicaSet)
if e, a := test.expectedOldReplicas, int(*(updated.Spec.Replicas)); e != a {
t.Errorf("expected update to %d replicas, got %d", e, a)
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,403 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"testing"
"time"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/record"
testclient "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/util/intstr"
)
func maxSurge(val int) *intstr.IntOrString {
surge := intstr.FromInt(val)
return &surge
}
func TestScale(t *testing.T) {
newTimestamp := metav1.Date(2016, 5, 20, 2, 0, 0, 0, time.UTC)
oldTimestamp := metav1.Date(2016, 5, 20, 1, 0, 0, 0, time.UTC)
olderTimestamp := metav1.Date(2016, 5, 20, 0, 0, 0, 0, time.UTC)
var updatedTemplate = func(replicas int) *extensions.Deployment {
d := newDeployment("foo", replicas, nil, nil, nil, map[string]string{"foo": "bar"})
d.Spec.Template.Labels["another"] = "label"
return d
}
tests := []struct {
name string
deployment *extensions.Deployment
oldDeployment *extensions.Deployment
newRS *extensions.ReplicaSet
oldRSs []*extensions.ReplicaSet
expectedNew *extensions.ReplicaSet
expectedOld []*extensions.ReplicaSet
wasntUpdated map[string]bool
desiredReplicasAnnotations map[string]int32
}{
{
name: "normal scaling event: 10 -> 12",
deployment: newDeployment("foo", 12, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 10, nil, nil, nil, nil),
newRS: rs("foo-v1", 10, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{},
expectedNew: rs("foo-v1", 12, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{},
},
{
name: "normal scaling event: 10 -> 5",
deployment: newDeployment("foo", 5, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 10, nil, nil, nil, nil),
newRS: rs("foo-v1", 10, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{},
expectedNew: rs("foo-v1", 5, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{},
},
{
name: "proportional scaling: 5 -> 10",
deployment: newDeployment("foo", 10, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil),
newRS: rs("foo-v2", 2, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)},
expectedNew: rs("foo-v2", 4, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)},
},
{
name: "proportional scaling: 5 -> 3",
deployment: newDeployment("foo", 3, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil),
newRS: rs("foo-v2", 2, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)},
expectedNew: rs("foo-v2", 1, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 2, nil, oldTimestamp)},
},
{
name: "proportional scaling: 9 -> 4",
deployment: newDeployment("foo", 4, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 9, nil, nil, nil, nil),
newRS: rs("foo-v2", 8, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)},
expectedNew: rs("foo-v2", 4, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 0, nil, oldTimestamp)},
},
{
name: "proportional scaling: 7 -> 10",
deployment: newDeployment("foo", 10, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 7, nil, nil, nil, nil),
newRS: rs("foo-v3", 2, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 3, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 4, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)},
},
{
name: "proportional scaling: 13 -> 8",
deployment: newDeployment("foo", 8, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 13, nil, nil, nil, nil),
newRS: rs("foo-v3", 2, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 8, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 1, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 5, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
},
// Scales up the new replica set.
{
name: "leftover distribution: 3 -> 4",
deployment: newDeployment("foo", 4, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 3, nil, nil, nil, nil),
newRS: rs("foo-v3", 1, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 2, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
},
// Scales down the older replica set.
{
name: "leftover distribution: 3 -> 2",
deployment: newDeployment("foo", 2, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 3, nil, nil, nil, nil),
newRS: rs("foo-v3", 1, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 1, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
},
// Scales up the latest replica set first.
{
name: "proportional scaling (no new rs): 4 -> 5",
deployment: newDeployment("foo", 5, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 4, nil, nil, nil, nil),
newRS: nil,
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
expectedNew: nil,
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
},
// Scales down to zero
{
name: "proportional scaling: 6 -> 0",
deployment: newDeployment("foo", 0, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 6, nil, nil, nil, nil),
newRS: rs("foo-v3", 3, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 0, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
},
// Scales up from zero
{
name: "proportional scaling: 0 -> 6",
deployment: newDeployment("foo", 6, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 6, nil, nil, nil, nil),
newRS: rs("foo-v3", 0, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 6, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
wasntUpdated: map[string]bool{"foo-v2": true, "foo-v1": true},
},
// Scenario: deployment.spec.replicas == 3 ( foo-v1.spec.replicas == foo-v2.spec.replicas == foo-v3.spec.replicas == 1 )
// Deployment is scaled to 5. foo-v3.spec.replicas and foo-v2.spec.replicas should increment by 1 but foo-v2 fails to
// update.
{
name: "failed rs update",
deployment: newDeployment("foo", 5, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil),
newRS: rs("foo-v3", 2, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 2, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
wasntUpdated: map[string]bool{"foo-v3": true, "foo-v1": true},
desiredReplicasAnnotations: map[string]int32{"foo-v2": int32(3)},
},
{
name: "deployment with surge pods",
deployment: newDeployment("foo", 20, nil, maxSurge(2), nil, nil),
oldDeployment: newDeployment("foo", 10, nil, maxSurge(2), nil, nil),
newRS: rs("foo-v2", 6, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)},
expectedNew: rs("foo-v2", 11, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 11, nil, oldTimestamp)},
},
{
name: "change both surge and size",
deployment: newDeployment("foo", 50, nil, maxSurge(6), nil, nil),
oldDeployment: newDeployment("foo", 10, nil, maxSurge(3), nil, nil),
newRS: rs("foo-v2", 5, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 8, nil, oldTimestamp)},
expectedNew: rs("foo-v2", 22, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 34, nil, oldTimestamp)},
},
{
name: "change both size and template",
deployment: updatedTemplate(14),
oldDeployment: newDeployment("foo", 10, nil, nil, nil, map[string]string{"foo": "bar"}),
newRS: nil,
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 7, nil, newTimestamp), rs("foo-v1", 3, nil, oldTimestamp)},
expectedNew: nil,
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 10, nil, newTimestamp), rs("foo-v1", 4, nil, oldTimestamp)},
},
}
for _, test := range tests {
_ = olderTimestamp
t.Log(test.name)
fake := fake.Clientset{}
dc := &DeploymentController{
client: &fake,
eventRecorder: &record.FakeRecorder{},
}
if test.newRS != nil {
desiredReplicas := *(test.oldDeployment.Spec.Replicas)
if desired, ok := test.desiredReplicasAnnotations[test.newRS.Name]; ok {
desiredReplicas = desired
}
deploymentutil.SetReplicasAnnotations(test.newRS, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment))
}
for i := range test.oldRSs {
rs := test.oldRSs[i]
if rs == nil {
continue
}
desiredReplicas := *(test.oldDeployment.Spec.Replicas)
if desired, ok := test.desiredReplicasAnnotations[rs.Name]; ok {
desiredReplicas = desired
}
deploymentutil.SetReplicasAnnotations(rs, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment))
}
if err := dc.scale(test.deployment, test.newRS, test.oldRSs); err != nil {
t.Errorf("%s: unexpected error: %v", test.name, err)
continue
}
// Construct the nameToSize map that will hold all the sizes we got our of tests
// Skip updating the map if the replica set wasn't updated since there will be
// no update action for it.
nameToSize := make(map[string]int32)
if test.newRS != nil {
nameToSize[test.newRS.Name] = *(test.newRS.Spec.Replicas)
}
for i := range test.oldRSs {
rs := test.oldRSs[i]
nameToSize[rs.Name] = *(rs.Spec.Replicas)
}
// Get all the UPDATE actions and update nameToSize with all the updated sizes.
for _, action := range fake.Actions() {
rs := action.(testclient.UpdateAction).GetObject().(*extensions.ReplicaSet)
if !test.wasntUpdated[rs.Name] {
nameToSize[rs.Name] = *(rs.Spec.Replicas)
}
}
if test.expectedNew != nil && test.newRS != nil && *(test.expectedNew.Spec.Replicas) != nameToSize[test.newRS.Name] {
t.Errorf("%s: expected new replicas: %d, got: %d", test.name, *(test.expectedNew.Spec.Replicas), nameToSize[test.newRS.Name])
continue
}
if len(test.expectedOld) != len(test.oldRSs) {
t.Errorf("%s: expected %d old replica sets, got %d", test.name, len(test.expectedOld), len(test.oldRSs))
continue
}
for n := range test.oldRSs {
rs := test.oldRSs[n]
expected := test.expectedOld[n]
if *(expected.Spec.Replicas) != nameToSize[rs.Name] {
t.Errorf("%s: expected old (%s) replicas: %d, got: %d", test.name, rs.Name, *(expected.Spec.Replicas), nameToSize[rs.Name])
}
}
}
}
func TestDeploymentController_cleanupDeployment(t *testing.T) {
selector := map[string]string{"foo": "bar"}
tests := []struct {
oldRSs []*extensions.ReplicaSet
revisionHistoryLimit int32
expectedDeletions int
}{
{
oldRSs: []*extensions.ReplicaSet{
newRSWithStatus("foo-1", 0, 0, selector),
newRSWithStatus("foo-2", 0, 0, selector),
newRSWithStatus("foo-3", 0, 0, selector),
},
revisionHistoryLimit: 1,
expectedDeletions: 2,
},
{
// Only delete the replica set with Spec.Replicas = Status.Replicas = 0.
oldRSs: []*extensions.ReplicaSet{
newRSWithStatus("foo-1", 0, 0, selector),
newRSWithStatus("foo-2", 0, 1, selector),
newRSWithStatus("foo-3", 1, 0, selector),
newRSWithStatus("foo-4", 1, 1, selector),
},
revisionHistoryLimit: 0,
expectedDeletions: 1,
},
{
oldRSs: []*extensions.ReplicaSet{
newRSWithStatus("foo-1", 0, 0, selector),
newRSWithStatus("foo-2", 0, 0, selector),
},
revisionHistoryLimit: 0,
expectedDeletions: 2,
},
{
oldRSs: []*extensions.ReplicaSet{
newRSWithStatus("foo-1", 1, 1, selector),
newRSWithStatus("foo-2", 1, 1, selector),
},
revisionHistoryLimit: 0,
expectedDeletions: 0,
},
}
for i := range tests {
test := tests[i]
fake := &fake.Clientset{}
informers := informers.NewSharedInformerFactory(fake, nil, controller.NoResyncPeriodFunc())
controller := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), fake)
controller.eventRecorder = &record.FakeRecorder{}
controller.dListerSynced = alwaysReady
controller.rsListerSynced = alwaysReady
controller.podListerSynced = alwaysReady
for _, rs := range test.oldRSs {
controller.rsLister.Indexer.Add(rs)
}
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
d := newDeployment("foo", 1, &test.revisionHistoryLimit, nil, nil, map[string]string{"foo": "bar"})
controller.cleanupDeployment(test.oldRSs, d)
gotDeletions := 0
for _, action := range fake.Actions() {
if "delete" == action.GetVerb() {
gotDeletions++
}
}
if gotDeletions != test.expectedDeletions {
t.Errorf("expect %v old replica sets been deleted, but got %v", test.expectedDeletions, gotDeletions)
continue
}
}
}

View file

@ -0,0 +1,55 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["deployment_util.go"],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/annotations:go_default_library",
"//pkg/api/meta:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/labels:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/util/errors:go_default_library",
"//pkg/util/integer:go_default_library",
"//pkg/util/intstr:go_default_library",
"//pkg/util/labels:go_default_library",
"//pkg/util/pod:go_default_library",
"//pkg/util/wait:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = ["deployment_util_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/util/intstr:go_default_library",
"//vendor:github.com/stretchr/testify/assert",
],
)

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,59 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["disruption.go"],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/apis/policy/v1beta1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/core/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/policy/v1beta1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/intstr:go_default_library",
"//pkg/util/runtime:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/util/workqueue:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = ["disruption_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apimachinery/registered:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/apis/policy/v1beta1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/util/intstr:go_default_library",
"//pkg/util/uuid:go_default_library",
"//pkg/util/workqueue:go_default_library",
],
)

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

19
vendor/k8s.io/kubernetes/pkg/controller/doc.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package controller contains code for controllers (like the replication
// controller).
package controller // import "k8s.io/kubernetes/pkg/controller"

60
vendor/k8s.io/kubernetes/pkg/controller/endpoint/BUILD generated vendored Normal file
View file

@ -0,0 +1,60 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"endpoints_controller.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/errors:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/api/v1/endpoints:go_default_library",
"//pkg/api/v1/pod:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/informers:go_default_library",
"//pkg/labels:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/util/metrics:go_default_library",
"//pkg/util/runtime:go_default_library",
"//pkg/util/sets:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/util/workqueue:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = ["endpoints_controller_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/testapi:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/api/v1/endpoints:go_default_library",
"//pkg/apimachinery/registered:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/restclient:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/util/intstr:go_default_library",
"//pkg/util/testing:go_default_library",
],
)

View file

@ -0,0 +1,19 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package service provides EndpointController implementation
// to manage and sync service endpoints.
package endpoint // import "k8s.io/kubernetes/pkg/controller/endpoint"

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,72 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"garbagecollector.go",
"metrics.go",
"rate_limiter_helper.go",
"uid_cache.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/errors:go_default_library",
"//pkg/api/meta:go_default_library",
"//pkg/api/meta/metatypes:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/typed/dynamic:go_default_library",
"//pkg/controller/garbagecollector/metaonly:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/runtime/schema:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/clock:go_default_library",
"//pkg/util/errors:go_default_library",
"//pkg/util/metrics:go_default_library",
"//pkg/util/runtime:go_default_library",
"//pkg/util/sets:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/util/workqueue:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/golang/glog",
"//vendor:github.com/golang/groupcache/lru",
"//vendor:github.com/prometheus/client_golang/prometheus",
],
)
go_test(
name = "go_default_test",
srcs = ["garbagecollector_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/install:go_default_library",
"//pkg/api/meta/metatypes:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apimachinery/registered:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/restclient:go_default_library",
"//pkg/client/typed/dynamic:go_default_library",
"//pkg/controller/garbagecollector/metaonly:go_default_library",
"//pkg/runtime/schema:go_default_library",
"//pkg/runtime/serializer:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/clock:go_default_library",
"//pkg/util/json:go_default_library",
"//pkg/util/sets:go_default_library",
"//pkg/util/workqueue:go_default_library",
"//vendor:github.com/stretchr/testify/assert",
],
)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,477 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package garbagecollector
import (
"net/http"
"net/http/httptest"
"strings"
"sync"
"testing"
"github.com/stretchr/testify/assert"
_ "k8s.io/kubernetes/pkg/api/install"
"k8s.io/kubernetes/pkg/api/meta/metatypes"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/typed/dynamic"
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
"k8s.io/kubernetes/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/runtime/serializer"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/clock"
"k8s.io/kubernetes/pkg/util/json"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/workqueue"
)
func TestNewGarbageCollector(t *testing.T) {
config := &restclient.Config{}
config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
metaOnlyClientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
config.ContentConfig.NegotiatedSerializer = nil
clientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
podResource := []schema.GroupVersionResource{{Version: "v1", Resource: "pods"}}
gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, registered.RESTMapper(), podResource)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, 1, len(gc.monitors))
}
// fakeAction records information about requests to aid in testing.
type fakeAction struct {
method string
path string
query string
}
// String returns method=path to aid in testing
func (f *fakeAction) String() string {
return strings.Join([]string{f.method, f.path}, "=")
}
type FakeResponse struct {
statusCode int
content []byte
}
// fakeActionHandler holds a list of fakeActions received
type fakeActionHandler struct {
// statusCode and content returned by this handler for different method + path.
response map[string]FakeResponse
lock sync.Mutex
actions []fakeAction
}
// ServeHTTP logs the action that occurred and always returns the associated status code
func (f *fakeActionHandler) ServeHTTP(response http.ResponseWriter, request *http.Request) {
f.lock.Lock()
defer f.lock.Unlock()
f.actions = append(f.actions, fakeAction{method: request.Method, path: request.URL.Path, query: request.URL.RawQuery})
fakeResponse, ok := f.response[request.Method+request.URL.Path]
if !ok {
fakeResponse.statusCode = 200
fakeResponse.content = []byte("{\"kind\": \"List\"}")
}
response.Header().Set("Content-Type", "application/json")
response.WriteHeader(fakeResponse.statusCode)
response.Write(fakeResponse.content)
}
// testServerAndClientConfig returns a server that listens and a config that can reference it
func testServerAndClientConfig(handler func(http.ResponseWriter, *http.Request)) (*httptest.Server, *restclient.Config) {
srv := httptest.NewServer(http.HandlerFunc(handler))
config := &restclient.Config{
Host: srv.URL,
}
return srv, config
}
func setupGC(t *testing.T, config *restclient.Config) *GarbageCollector {
config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
metaOnlyClientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
config.ContentConfig.NegotiatedSerializer = nil
clientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
podResource := []schema.GroupVersionResource{{Version: "v1", Resource: "pods"}}
gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, registered.RESTMapper(), podResource)
if err != nil {
t.Fatal(err)
}
return gc
}
func getPod(podName string, ownerReferences []v1.OwnerReference) *v1.Pod {
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: v1.ObjectMeta{
Name: podName,
Namespace: "ns1",
OwnerReferences: ownerReferences,
},
}
}
func serilizeOrDie(t *testing.T, object interface{}) []byte {
data, err := json.Marshal(object)
if err != nil {
t.Fatal(err)
}
return data
}
// test the processItem function making the expected actions.
func TestProcessItem(t *testing.T) {
pod := getPod("ToBeDeletedPod", []v1.OwnerReference{
{
Kind: "ReplicationController",
Name: "owner1",
UID: "123",
APIVersion: "v1",
},
})
testHandler := &fakeActionHandler{
response: map[string]FakeResponse{
"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/owner1": {
404,
[]byte{},
},
"GET" + "/api/v1/namespaces/ns1/pods/ToBeDeletedPod": {
200,
serilizeOrDie(t, pod),
},
},
}
srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
defer srv.Close()
gc := setupGC(t, clientConfig)
item := &node{
identity: objectReference{
OwnerReference: metatypes.OwnerReference{
Kind: pod.Kind,
APIVersion: pod.APIVersion,
Name: pod.Name,
UID: pod.UID,
},
Namespace: pod.Namespace,
},
// owners are intentionally left empty. The processItem routine should get the latest item from the server.
owners: nil,
}
err := gc.processItem(item)
if err != nil {
t.Errorf("Unexpected Error: %v", err)
}
expectedActionSet := sets.NewString()
expectedActionSet.Insert("GET=/api/v1/namespaces/ns1/replicationcontrollers/owner1")
expectedActionSet.Insert("DELETE=/api/v1/namespaces/ns1/pods/ToBeDeletedPod")
expectedActionSet.Insert("GET=/api/v1/namespaces/ns1/pods/ToBeDeletedPod")
actualActionSet := sets.NewString()
for _, action := range testHandler.actions {
actualActionSet.Insert(action.String())
}
if !expectedActionSet.Equal(actualActionSet) {
t.Errorf("expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet,
actualActionSet, expectedActionSet.Difference(actualActionSet))
}
}
// verifyGraphInvariants verifies that all of a node's owners list the node as a
// dependent and vice versa. uidToNode has all the nodes in the graph.
func verifyGraphInvariants(scenario string, uidToNode map[types.UID]*node, t *testing.T) {
for myUID, node := range uidToNode {
for dependentNode := range node.dependents {
found := false
for _, owner := range dependentNode.owners {
if owner.UID == myUID {
found = true
break
}
}
if !found {
t.Errorf("scenario: %s: node %s has node %s as a dependent, but it's not present in the latter node's owners list", scenario, node.identity, dependentNode.identity)
}
}
for _, owner := range node.owners {
ownerNode, ok := uidToNode[owner.UID]
if !ok {
// It's possible that the owner node doesn't exist
continue
}
if _, ok := ownerNode.dependents[node]; !ok {
t.Errorf("node %s has node %s as an owner, but it's not present in the latter node's dependents list", node.identity, ownerNode.identity)
}
}
}
}
func createEvent(eventType eventType, selfUID string, owners []string) event {
var ownerReferences []v1.OwnerReference
for i := 0; i < len(owners); i++ {
ownerReferences = append(ownerReferences, v1.OwnerReference{UID: types.UID(owners[i])})
}
return event{
eventType: eventType,
obj: &v1.Pod{
ObjectMeta: v1.ObjectMeta{
UID: types.UID(selfUID),
OwnerReferences: ownerReferences,
},
},
}
}
func TestProcessEvent(t *testing.T) {
var testScenarios = []struct {
name string
// a series of events that will be supplied to the
// Propagator.eventQueue.
events []event
}{
{
name: "test1",
events: []event{
createEvent(addEvent, "1", []string{}),
createEvent(addEvent, "2", []string{"1"}),
createEvent(addEvent, "3", []string{"1", "2"}),
},
},
{
name: "test2",
events: []event{
createEvent(addEvent, "1", []string{}),
createEvent(addEvent, "2", []string{"1"}),
createEvent(addEvent, "3", []string{"1", "2"}),
createEvent(addEvent, "4", []string{"2"}),
createEvent(deleteEvent, "2", []string{"doesn't matter"}),
},
},
{
name: "test3",
events: []event{
createEvent(addEvent, "1", []string{}),
createEvent(addEvent, "2", []string{"1"}),
createEvent(addEvent, "3", []string{"1", "2"}),
createEvent(addEvent, "4", []string{"3"}),
createEvent(updateEvent, "2", []string{"4"}),
},
},
{
name: "reverse test2",
events: []event{
createEvent(addEvent, "4", []string{"2"}),
createEvent(addEvent, "3", []string{"1", "2"}),
createEvent(addEvent, "2", []string{"1"}),
createEvent(addEvent, "1", []string{}),
createEvent(deleteEvent, "2", []string{"doesn't matter"}),
},
},
}
for _, scenario := range testScenarios {
propagator := &Propagator{
eventQueue: workqueue.NewTimedWorkQueue(),
uidToNode: &concurrentUIDToNode{
RWMutex: &sync.RWMutex{},
uidToNode: make(map[types.UID]*node),
},
gc: &GarbageCollector{
dirtyQueue: workqueue.NewTimedWorkQueue(),
clock: clock.RealClock{},
absentOwnerCache: NewUIDCache(2),
},
}
for i := 0; i < len(scenario.events); i++ {
propagator.eventQueue.Add(&workqueue.TimedWorkQueueItem{StartTime: propagator.gc.clock.Now(), Object: &scenario.events[i]})
propagator.processEvent()
verifyGraphInvariants(scenario.name, propagator.uidToNode.uidToNode, t)
}
}
}
// TestDependentsRace relies on golang's data race detector to check if there is
// data race among in the dependents field.
func TestDependentsRace(t *testing.T) {
gc := setupGC(t, &restclient.Config{})
const updates = 100
owner := &node{dependents: make(map[*node]struct{})}
ownerUID := types.UID("owner")
gc.propagator.uidToNode.Write(owner)
go func() {
for i := 0; i < updates; i++ {
dependent := &node{}
gc.propagator.addDependentToOwners(dependent, []metatypes.OwnerReference{{UID: ownerUID}})
gc.propagator.removeDependentFromOwners(dependent, []metatypes.OwnerReference{{UID: ownerUID}})
}
}()
go func() {
gc.orphanQueue.Add(&workqueue.TimedWorkQueueItem{StartTime: gc.clock.Now(), Object: owner})
for i := 0; i < updates; i++ {
gc.orphanFinalizer()
}
}()
}
// test the list and watch functions correctly converts the ListOptions
func TestGCListWatcher(t *testing.T) {
testHandler := &fakeActionHandler{}
srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
defer srv.Close()
clientPool := dynamic.NewClientPool(clientConfig, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
podResource := schema.GroupVersionResource{Version: "v1", Resource: "pods"}
client, err := clientPool.ClientForGroupVersionResource(podResource)
if err != nil {
t.Fatal(err)
}
lw := gcListWatcher(client, podResource)
lw.Watch(v1.ListOptions{ResourceVersion: "1"})
lw.List(v1.ListOptions{ResourceVersion: "1"})
if e, a := 2, len(testHandler.actions); e != a {
t.Errorf("expect %d requests, got %d", e, a)
}
if e, a := "resourceVersion=1", testHandler.actions[0].query; e != a {
t.Errorf("expect %s, got %s", e, a)
}
if e, a := "resourceVersion=1", testHandler.actions[1].query; e != a {
t.Errorf("expect %s, got %s", e, a)
}
}
func podToGCNode(pod *v1.Pod) *node {
return &node{
identity: objectReference{
OwnerReference: metatypes.OwnerReference{
Kind: pod.Kind,
APIVersion: pod.APIVersion,
Name: pod.Name,
UID: pod.UID,
},
Namespace: pod.Namespace,
},
// owners are intentionally left empty. The processItem routine should get the latest item from the server.
owners: nil,
}
}
func TestAbsentUIDCache(t *testing.T) {
rc1Pod1 := getPod("rc1Pod1", []v1.OwnerReference{
{
Kind: "ReplicationController",
Name: "rc1",
UID: "1",
APIVersion: "v1",
},
})
rc1Pod2 := getPod("rc1Pod2", []v1.OwnerReference{
{
Kind: "ReplicationController",
Name: "rc1",
UID: "1",
APIVersion: "v1",
},
})
rc2Pod1 := getPod("rc2Pod1", []v1.OwnerReference{
{
Kind: "ReplicationController",
Name: "rc2",
UID: "2",
APIVersion: "v1",
},
})
rc3Pod1 := getPod("rc3Pod1", []v1.OwnerReference{
{
Kind: "ReplicationController",
Name: "rc3",
UID: "3",
APIVersion: "v1",
},
})
testHandler := &fakeActionHandler{
response: map[string]FakeResponse{
"GET" + "/api/v1/namespaces/ns1/pods/rc1Pod1": {
200,
serilizeOrDie(t, rc1Pod1),
},
"GET" + "/api/v1/namespaces/ns1/pods/rc1Pod2": {
200,
serilizeOrDie(t, rc1Pod2),
},
"GET" + "/api/v1/namespaces/ns1/pods/rc2Pod1": {
200,
serilizeOrDie(t, rc2Pod1),
},
"GET" + "/api/v1/namespaces/ns1/pods/rc3Pod1": {
200,
serilizeOrDie(t, rc3Pod1),
},
"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/rc1": {
404,
[]byte{},
},
"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/rc2": {
404,
[]byte{},
},
"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/rc3": {
404,
[]byte{},
},
},
}
srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
defer srv.Close()
gc := setupGC(t, clientConfig)
gc.absentOwnerCache = NewUIDCache(2)
gc.processItem(podToGCNode(rc1Pod1))
gc.processItem(podToGCNode(rc2Pod1))
// rc1 should already be in the cache, no request should be sent. rc1 should be promoted in the UIDCache
gc.processItem(podToGCNode(rc1Pod2))
// after this call, rc2 should be evicted from the UIDCache
gc.processItem(podToGCNode(rc3Pod1))
// check cache
if !gc.absentOwnerCache.Has(types.UID("1")) {
t.Errorf("expected rc1 to be in the cache")
}
if gc.absentOwnerCache.Has(types.UID("2")) {
t.Errorf("expected rc2 to not exist in the cache")
}
if !gc.absentOwnerCache.Has(types.UID("3")) {
t.Errorf("expected rc3 to be in the cache")
}
// check the request sent to the server
count := 0
for _, action := range testHandler.actions {
if action.String() == "GET=/api/v1/namespaces/ns1/replicationcontrollers/rc1" {
count++
}
}
if count != 1 {
t.Errorf("expected only 1 GET rc1 request, got %d", count)
}
}

View file

@ -0,0 +1,47 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"metaonly.go",
"types.generated.go",
"types.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/runtime/schema:go_default_library",
"//pkg/runtime/serializer:go_default_library",
"//pkg/types:go_default_library",
"//vendor:github.com/ugorji/go/codec",
],
)
go_test(
name = "go_default_test",
srcs = ["metaonly_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/install:go_default_library",
"//pkg/api/meta:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/runtime/schema:go_default_library",
"//pkg/runtime/serializer:go_default_library",
],
)

View file

@ -0,0 +1,65 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metaonly
import (
"fmt"
"strings"
"k8s.io/kubernetes/pkg/api"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/runtime/serializer"
)
func (obj *MetadataOnlyObject) GetObjectKind() schema.ObjectKind { return obj }
func (obj *MetadataOnlyObjectList) GetObjectKind() schema.ObjectKind { return obj }
type metaOnlyJSONScheme struct{}
// This function can be extended to mapping different gvk to different MetadataOnlyObject,
// which embedded with different version of ObjectMeta. Currently the system
// only supports v1.ObjectMeta.
func gvkToMetadataOnlyObject(gvk schema.GroupVersionKind) runtime.Object {
if strings.HasSuffix(gvk.Kind, "List") {
return &MetadataOnlyObjectList{}
} else {
return &MetadataOnlyObject{}
}
}
func NewMetadataCodecFactory() serializer.CodecFactory {
// populating another scheme from api.Scheme, registering every kind with
// MetadataOnlyObject (or MetadataOnlyObjectList).
scheme := runtime.NewScheme()
allTypes := api.Scheme.AllKnownTypes()
for kind := range allTypes {
if kind.Version == runtime.APIVersionInternal {
continue
}
metaOnlyObject := gvkToMetadataOnlyObject(kind)
scheme.AddKnownTypeWithName(kind, metaOnlyObject)
}
scheme.AddUnversionedTypes(api.Unversioned, &metav1.Status{})
return serializer.NewCodecFactory(scheme)
}
// String converts a MetadataOnlyObject to a human-readable string.
func (metaOnly MetadataOnlyObject) String() string {
return fmt.Sprintf("%s/%s, name: %s, DeletionTimestamp:%v", metaOnly.TypeMeta.APIVersion, metaOnly.TypeMeta.Kind, metaOnly.ObjectMeta.Name, metaOnly.ObjectMeta.DeletionTimestamp)
}

View file

@ -0,0 +1,164 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metaonly
import (
"encoding/json"
"reflect"
"testing"
_ "k8s.io/kubernetes/pkg/api/install"
"k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/runtime/serializer"
)
func getPod() *v1.Pod {
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: v1.ObjectMeta{
Name: "pod",
OwnerReferences: []v1.OwnerReference{
{UID: "1234"},
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fake-name",
Image: "fakeimage",
},
},
},
}
}
func getPodJson(t *testing.T) []byte {
data, err := json.Marshal(getPod())
if err != nil {
t.Fatal(err)
}
return data
}
func getPodListJson(t *testing.T) []byte {
data, err := json.Marshal(&v1.PodList{
TypeMeta: metav1.TypeMeta{
Kind: "PodList",
APIVersion: "v1",
},
Items: []v1.Pod{
*getPod(),
*getPod(),
},
})
if err != nil {
t.Fatal(err)
}
return data
}
func verfiyMetadata(description string, t *testing.T, in *MetadataOnlyObject) {
pod := getPod()
if e, a := pod.ObjectMeta, in.ObjectMeta; !reflect.DeepEqual(e, a) {
t.Errorf("%s: expected %#v, got %#v", description, e, a)
}
}
func TestDecodeToMetadataOnlyObject(t *testing.T) {
data := getPodJson(t)
cf := serializer.DirectCodecFactory{CodecFactory: NewMetadataCodecFactory()}
info, ok := runtime.SerializerInfoForMediaType(cf.SupportedMediaTypes(), runtime.ContentTypeJSON)
if !ok {
t.Fatalf("expected to get a JSON serializer")
}
codec := cf.DecoderToVersion(info.Serializer, schema.GroupVersion{Group: "SOMEGROUP", Version: "SOMEVERSION"})
// decode with into
into := &MetadataOnlyObject{}
ret, _, err := codec.Decode(data, nil, into)
if err != nil {
t.Fatal(err)
}
metaOnly, ok := ret.(*MetadataOnlyObject)
if !ok {
t.Fatalf("expected ret to be *runtime.MetadataOnlyObject")
}
verfiyMetadata("check returned metaonly with into", t, metaOnly)
verfiyMetadata("check into", t, into)
// decode without into
ret, _, err = codec.Decode(data, nil, nil)
if err != nil {
t.Fatal(err)
}
metaOnly, ok = ret.(*MetadataOnlyObject)
if !ok {
t.Fatalf("expected ret to be *runtime.MetadataOnlyObject")
}
verfiyMetadata("check returned metaonly without into", t, metaOnly)
}
func verifyListMetadata(t *testing.T, metaOnlyList *MetadataOnlyObjectList) {
items, err := meta.ExtractList(metaOnlyList)
if err != nil {
t.Fatal(err)
}
for _, item := range items {
metaOnly, ok := item.(*MetadataOnlyObject)
if !ok {
t.Fatalf("expected item to be *MetadataOnlyObject")
}
verfiyMetadata("check list", t, metaOnly)
}
}
func TestDecodeToMetadataOnlyObjectList(t *testing.T) {
data := getPodListJson(t)
cf := serializer.DirectCodecFactory{CodecFactory: NewMetadataCodecFactory()}
info, ok := runtime.SerializerInfoForMediaType(cf.SupportedMediaTypes(), runtime.ContentTypeJSON)
if !ok {
t.Fatalf("expected to get a JSON serializer")
}
codec := cf.DecoderToVersion(info.Serializer, schema.GroupVersion{Group: "SOMEGROUP", Version: "SOMEVERSION"})
// decode with into
into := &MetadataOnlyObjectList{}
ret, _, err := codec.Decode(data, nil, into)
if err != nil {
t.Fatal(err)
}
metaOnlyList, ok := ret.(*MetadataOnlyObjectList)
if !ok {
t.Fatalf("expected ret to be *runtime.UnstructuredList")
}
verifyListMetadata(t, metaOnlyList)
verifyListMetadata(t, into)
// decode without into
ret, _, err = codec.Decode(data, nil, nil)
if err != nil {
t.Fatal(err)
}
metaOnlyList, ok = ret.(*MetadataOnlyObjectList)
if !ok {
t.Fatalf("expected ret to be *runtime.UnstructuredList")
}
verifyListMetadata(t, metaOnlyList)
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,42 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metaonly
import (
"k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
)
// MetadataOnlyObject allows decoding only the apiVersion, kind, and metadata fields of
// JSON data.
// TODO: enable meta-only decoding for protobuf.
type MetadataOnlyObject struct {
metav1.TypeMeta `json:",inline"`
// +optional
v1.ObjectMeta `json:"metadata,omitempty"`
}
// MetadataOnlyObjectList allows decoding from JSON data only the typemeta and metadata of
// a list, and those of the enclosing objects.
// TODO: enable meta-only decoding for protobuf.
type MetadataOnlyObjectList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
Items []MetadataOnlyObject `json:"items"`
}

View file

@ -0,0 +1,73 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package garbagecollector
import (
"sync"
"time"
"k8s.io/kubernetes/pkg/util/clock"
"github.com/prometheus/client_golang/prometheus"
)
const (
GarbageCollectSubsystem = "garbage_collector"
EventProcessingLatencyKey = "event_processing_latency_microseconds"
DirtyProcessingLatencyKey = "dirty_processing_latency_microseconds"
OrphanProcessingLatencyKey = "orphan_processing_latency_microseconds"
)
var (
EventProcessingLatency = prometheus.NewSummary(
prometheus.SummaryOpts{
Subsystem: GarbageCollectSubsystem,
Name: EventProcessingLatencyKey,
Help: "Time in microseconds of an event spend in the eventQueue",
},
)
DirtyProcessingLatency = prometheus.NewSummary(
prometheus.SummaryOpts{
Subsystem: GarbageCollectSubsystem,
Name: DirtyProcessingLatencyKey,
Help: "Time in microseconds of an item spend in the dirtyQueue",
},
)
OrphanProcessingLatency = prometheus.NewSummary(
prometheus.SummaryOpts{
Subsystem: GarbageCollectSubsystem,
Name: OrphanProcessingLatencyKey,
Help: "Time in microseconds of an item spend in the orphanQueue",
},
)
)
var registerMetrics sync.Once
// Register all metrics.
func Register() {
// Register the metrics.
registerMetrics.Do(func() {
prometheus.MustRegister(EventProcessingLatency)
prometheus.MustRegister(DirtyProcessingLatency)
prometheus.MustRegister(OrphanProcessingLatency)
})
}
func sinceInMicroseconds(clock clock.Clock, start time.Time) float64 {
return float64(clock.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds())
}

View file

@ -0,0 +1,60 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package garbagecollector
import (
"fmt"
"strings"
"sync"
"k8s.io/kubernetes/pkg/client/typed/dynamic"
"k8s.io/kubernetes/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/util/metrics"
)
// RegisteredRateLimiter records the registered RateLimters to avoid
// duplication.
type RegisteredRateLimiter struct {
rateLimiters map[schema.GroupVersion]*sync.Once
}
// NewRegisteredRateLimiter returns a new RegisteredRateLimiater.
// TODO: NewRegisteredRateLimiter is not dynamic. We need to find a better way
// when GC dynamically change the resources it monitors.
func NewRegisteredRateLimiter(resources []schema.GroupVersionResource) *RegisteredRateLimiter {
rateLimiters := make(map[schema.GroupVersion]*sync.Once)
for _, resource := range resources {
gv := resource.GroupVersion()
if _, found := rateLimiters[gv]; !found {
rateLimiters[gv] = &sync.Once{}
}
}
return &RegisteredRateLimiter{rateLimiters: rateLimiters}
}
func (r *RegisteredRateLimiter) registerIfNotPresent(gv schema.GroupVersion, client *dynamic.Client, prefix string) {
once, found := r.rateLimiters[gv]
if !found {
return
}
once.Do(func() {
if rateLimiter := client.GetRateLimiter(); rateLimiter != nil {
group := strings.Replace(gv.Group, ".", ":", -1)
metrics.RegisterMetricAndTrackRateLimiterUsage(fmt.Sprintf("%s_%s_%s", prefix, group, gv.Version), rateLimiter)
}
})
}

View file

@ -0,0 +1,52 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package garbagecollector
import (
"sync"
"github.com/golang/groupcache/lru"
"k8s.io/kubernetes/pkg/types"
)
// UIDCache is an LRU cache for uid.
type UIDCache struct {
mutex sync.Mutex
cache *lru.Cache
}
// NewUIDCache returns a UIDCache.
func NewUIDCache(maxCacheEntries int) *UIDCache {
return &UIDCache{
cache: lru.New(maxCacheEntries),
}
}
// Add adds a uid to the cache.
func (c *UIDCache) Add(uid types.UID) {
c.mutex.Lock()
defer c.mutex.Unlock()
c.cache.Add(uid, nil)
}
// Has returns if a uid is in the cache.
func (c *UIDCache) Has(uid types.UID) bool {
c.mutex.Lock()
defer c.mutex.Unlock()
_, found := c.cache.Get(uid)
return found
}

View file

@ -0,0 +1,43 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"batch.go",
"core.go",
"extensions.go",
"factory.go",
"generic.go",
"rbac.go",
"storage.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/batch:go_default_library",
"//pkg/apis/batch/v1:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/rbac:go_default_library",
"//pkg/apis/storage/v1beta1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/listers/batch/v1:go_default_library",
"//pkg/client/listers/core/internalversion:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/runtime/schema:go_default_library",
"//pkg/watch:go_default_library",
],
)

View file

@ -0,0 +1,83 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package informers
import (
"reflect"
"time"
"k8s.io/kubernetes/pkg/api/v1"
batch "k8s.io/kubernetes/pkg/apis/batch/v1"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
batchv1listers "k8s.io/kubernetes/pkg/client/listers/batch/v1"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch"
)
// JobInformer is type of SharedIndexInformer which watches and lists all jobs.
// Interface provides constructor for informer and lister for jobs
type JobInformer interface {
Informer() cache.SharedIndexInformer
Lister() batchv1listers.JobLister
}
type jobInformer struct {
*sharedInformerFactory
}
// Informer checks whether jobInformer exists in sharedInformerFactory and if not, it creates new informer of type
// jobInformer and connects it to sharedInformerFactory
func (f *jobInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(&batch.Job{})
informer, exists := f.informers[informerType]
if exists {
return informer
}
informer = NewJobInformer(f.client, f.defaultResync)
f.informers[informerType] = informer
return informer
}
// NewJobInformer returns a SharedIndexInformer that lists and watches all jobs
func NewJobInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return client.Batch().Jobs(v1.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return client.Batch().Jobs(v1.NamespaceAll).Watch(options)
},
},
&batch.Job{},
resyncPeriod,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
return sharedIndexInformer
}
// Lister returns lister for jobInformer
func (f *jobInformer) Lister() batchv1listers.JobLister {
informer := f.Informer()
return batchv1listers.NewJobLister(informer.GetIndexer())
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,157 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package informers
import (
"reflect"
"time"
"k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch"
)
// DaemonSetInformer is type of SharedIndexInformer which watches and lists all pods.
// Interface provides constructor for informer and lister for pods
type DaemonSetInformer interface {
Informer() cache.SharedIndexInformer
Lister() *cache.StoreToDaemonSetLister
}
type daemonSetInformer struct {
*sharedInformerFactory
}
func (f *daemonSetInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(&extensions.DaemonSet{})
informer, exists := f.informers[informerType]
if exists {
return informer
}
informer = cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return f.client.Extensions().DaemonSets(v1.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return f.client.Extensions().DaemonSets(v1.NamespaceAll).Watch(options)
},
},
&extensions.DaemonSet{},
f.defaultResync,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
f.informers[informerType] = informer
return informer
}
func (f *daemonSetInformer) Lister() *cache.StoreToDaemonSetLister {
informer := f.Informer()
return &cache.StoreToDaemonSetLister{Store: informer.GetIndexer()}
}
// DeploymentInformer is a type of SharedIndexInformer which watches and lists all deployments.
type DeploymentInformer interface {
Informer() cache.SharedIndexInformer
Lister() *cache.StoreToDeploymentLister
}
type deploymentInformer struct {
*sharedInformerFactory
}
func (f *deploymentInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(&extensions.Deployment{})
informer, exists := f.informers[informerType]
if exists {
return informer
}
informer = cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return f.client.Extensions().Deployments(v1.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return f.client.Extensions().Deployments(v1.NamespaceAll).Watch(options)
},
},
&extensions.Deployment{},
// TODO remove this. It is hardcoded so that "Waiting for the second deployment to clear overlapping annotation" in
// "overlapping deployment should not fight with each other" will work since it requires a full resync to work properly.
30*time.Second,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
f.informers[informerType] = informer
return informer
}
func (f *deploymentInformer) Lister() *cache.StoreToDeploymentLister {
informer := f.Informer()
return &cache.StoreToDeploymentLister{Indexer: informer.GetIndexer()}
}
// ReplicaSetInformer is a type of SharedIndexInformer which watches and lists all replicasets.
type ReplicaSetInformer interface {
Informer() cache.SharedIndexInformer
Lister() *cache.StoreToReplicaSetLister
}
type replicaSetInformer struct {
*sharedInformerFactory
}
func (f *replicaSetInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(&extensions.ReplicaSet{})
informer, exists := f.informers[informerType]
if exists {
return informer
}
informer = cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return f.client.Extensions().ReplicaSets(v1.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return f.client.Extensions().ReplicaSets(v1.NamespaceAll).Watch(options)
},
},
&extensions.ReplicaSet{},
f.defaultResync,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
f.informers[informerType] = informer
return informer
}
func (f *replicaSetInformer) Lister() *cache.StoreToReplicaSetLister {
informer := f.Informer()
return &cache.StoreToReplicaSetLister{Indexer: informer.GetIndexer()}
}

View file

@ -0,0 +1,183 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package informers
import (
"reflect"
"sync"
"time"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/runtime/schema"
)
// SharedInformerFactory provides interface which holds unique informers for pods, nodes, namespaces, persistent volume
// claims and persistent volumes
type SharedInformerFactory interface {
// Start starts informers that can start AFTER the API server and controllers have started
Start(stopCh <-chan struct{})
ForResource(schema.GroupResource) (GenericInformer, error)
// when you update these, update generic.go/ForResource, same package
Pods() PodInformer
LimitRanges() LimitRangeInformer
InternalLimitRanges() InternalLimitRangeInformer
Namespaces() NamespaceInformer
InternalNamespaces() InternalNamespaceInformer
Nodes() NodeInformer
PersistentVolumeClaims() PVCInformer
PersistentVolumes() PVInformer
ServiceAccounts() ServiceAccountInformer
DaemonSets() DaemonSetInformer
Deployments() DeploymentInformer
ReplicaSets() ReplicaSetInformer
ClusterRoleBindings() ClusterRoleBindingInformer
ClusterRoles() ClusterRoleInformer
RoleBindings() RoleBindingInformer
Roles() RoleInformer
StorageClasses() StorageClassInformer
Jobs() JobInformer
}
type sharedInformerFactory struct {
client clientset.Interface
// for admission plugins etc.
internalclient internalclientset.Interface
lock sync.Mutex
defaultResync time.Duration
informers map[reflect.Type]cache.SharedIndexInformer
// startedInformers is used for tracking which informers have been started
// this allows calling of Start method multiple times
startedInformers map[reflect.Type]bool
}
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory
func NewSharedInformerFactory(client clientset.Interface, internalclient internalclientset.Interface, defaultResync time.Duration) SharedInformerFactory {
return &sharedInformerFactory{
client: client,
internalclient: internalclient,
defaultResync: defaultResync,
informers: make(map[reflect.Type]cache.SharedIndexInformer),
startedInformers: make(map[reflect.Type]bool),
}
}
// Start initializes all requested informers.
func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
f.lock.Lock()
defer f.lock.Unlock()
for informerType, informer := range f.informers {
if !f.startedInformers[informerType] {
go informer.Run(stopCh)
f.startedInformers[informerType] = true
}
}
}
// Pods returns a SharedIndexInformer that lists and watches all pods
func (f *sharedInformerFactory) Pods() PodInformer {
return &podInformer{sharedInformerFactory: f}
}
// Nodes returns a SharedIndexInformer that lists and watches all nodes
func (f *sharedInformerFactory) Nodes() NodeInformer {
return &nodeInformer{sharedInformerFactory: f}
}
// Namespaces returns a SharedIndexInformer that lists and watches all namespaces
func (f *sharedInformerFactory) Namespaces() NamespaceInformer {
return &namespaceInformer{sharedInformerFactory: f}
}
// InternalNamespaces returns a SharedIndexInformer that lists and watches all namespaces
func (f *sharedInformerFactory) InternalNamespaces() InternalNamespaceInformer {
return &internalNamespaceInformer{sharedInformerFactory: f}
}
// PersistentVolumeClaims returns a SharedIndexInformer that lists and watches all persistent volume claims
func (f *sharedInformerFactory) PersistentVolumeClaims() PVCInformer {
return &pvcInformer{sharedInformerFactory: f}
}
// PersistentVolumes returns a SharedIndexInformer that lists and watches all persistent volumes
func (f *sharedInformerFactory) PersistentVolumes() PVInformer {
return &pvInformer{sharedInformerFactory: f}
}
// ServiceAccounts returns a SharedIndexInformer that lists and watches all service accounts.
func (f *sharedInformerFactory) ServiceAccounts() ServiceAccountInformer {
return &serviceAccountInformer{sharedInformerFactory: f}
}
// DaemonSets returns a SharedIndexInformer that lists and watches all daemon sets.
func (f *sharedInformerFactory) DaemonSets() DaemonSetInformer {
return &daemonSetInformer{sharedInformerFactory: f}
}
func (f *sharedInformerFactory) Deployments() DeploymentInformer {
return &deploymentInformer{sharedInformerFactory: f}
}
func (f *sharedInformerFactory) ReplicaSets() ReplicaSetInformer {
return &replicaSetInformer{sharedInformerFactory: f}
}
func (f *sharedInformerFactory) ClusterRoles() ClusterRoleInformer {
return &clusterRoleInformer{sharedInformerFactory: f}
}
func (f *sharedInformerFactory) ClusterRoleBindings() ClusterRoleBindingInformer {
return &clusterRoleBindingInformer{sharedInformerFactory: f}
}
func (f *sharedInformerFactory) Roles() RoleInformer {
return &roleInformer{sharedInformerFactory: f}
}
func (f *sharedInformerFactory) RoleBindings() RoleBindingInformer {
return &roleBindingInformer{sharedInformerFactory: f}
}
// LimitRanges returns a SharedIndexInformer that lists and watches all limit ranges.
func (f *sharedInformerFactory) LimitRanges() LimitRangeInformer {
return &limitRangeInformer{sharedInformerFactory: f}
}
// InternalLimitRanges returns a SharedIndexInformer that lists and watches all limit ranges.
func (f *sharedInformerFactory) InternalLimitRanges() InternalLimitRangeInformer {
return &internalLimitRangeInformer{sharedInformerFactory: f}
}
// StorageClasses returns a SharedIndexInformer that lists and watches all storage classes
func (f *sharedInformerFactory) StorageClasses() StorageClassInformer {
return &storageClassInformer{sharedInformerFactory: f}
}
// Jobs returns a SharedIndexInformer that lists and watches all storage jobs
func (f *sharedInformerFactory) Jobs() JobInformer {
return &jobInformer{sharedInformerFactory: f}
}

View file

@ -0,0 +1,90 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package informers
import (
"fmt"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/batch"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
rbacinternal "k8s.io/kubernetes/pkg/apis/rbac"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/runtime/schema"
)
// GenericInformer is type of SharedIndexInformer which will locate and delegate to other
// sharedInformers based on type
type GenericInformer interface {
Informer() cache.SharedIndexInformer
Lister() cache.GenericLister
}
// ForResource gives generic access to a shared informer of the matching type
// TODO extend this to unknown resources with a client pool
func (f *sharedInformerFactory) ForResource(resource schema.GroupResource) (GenericInformer, error) {
switch resource {
case api.Resource("pods"):
return &genericInformer{resource: resource, informer: f.Pods().Informer()}, nil
case api.Resource("limitranges"):
return &genericInformer{resource: resource, informer: f.LimitRanges().Informer()}, nil
case api.Resource("namespaces"):
return &genericInformer{resource: resource, informer: f.Namespaces().Informer()}, nil
case api.Resource("nodes"):
return &genericInformer{resource: resource, informer: f.Nodes().Informer()}, nil
case api.Resource("persistentvolumeclaims"):
return &genericInformer{resource: resource, informer: f.PersistentVolumeClaims().Informer()}, nil
case api.Resource("persistentvolumes"):
return &genericInformer{resource: resource, informer: f.PersistentVolumes().Informer()}, nil
case api.Resource("serviceaccounts"):
return &genericInformer{resource: resource, informer: f.ServiceAccounts().Informer()}, nil
case extensionsinternal.Resource("daemonsets"):
return &genericInformer{resource: resource, informer: f.DaemonSets().Informer()}, nil
case extensionsinternal.Resource("deployments"):
return &genericInformer{resource: resource, informer: f.Deployments().Informer()}, nil
case extensionsinternal.Resource("replicasets"):
return &genericInformer{resource: resource, informer: f.ReplicaSets().Informer()}, nil
case rbacinternal.Resource("clusterrolebindings"):
return &genericInformer{resource: resource, informer: f.ClusterRoleBindings().Informer()}, nil
case rbacinternal.Resource("clusterroles"):
return &genericInformer{resource: resource, informer: f.ClusterRoles().Informer()}, nil
case rbacinternal.Resource("rolebindings"):
return &genericInformer{resource: resource, informer: f.RoleBindings().Informer()}, nil
case rbacinternal.Resource("roles"):
return &genericInformer{resource: resource, informer: f.Roles().Informer()}, nil
case batch.Resource("jobs"):
return &genericInformer{resource: resource, informer: f.Jobs().Informer()}, nil
}
return nil, fmt.Errorf("no informer found for %v", resource)
}
type genericInformer struct {
informer cache.SharedIndexInformer
resource schema.GroupResource
}
func (f *genericInformer) Informer() cache.SharedIndexInformer {
return f.informer
}
func (f *genericInformer) Lister() cache.GenericLister {
return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource)
}

View file

@ -0,0 +1,196 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package informers
import (
"reflect"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
rbac "k8s.io/kubernetes/pkg/apis/rbac"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch"
)
type ClusterRoleInformer interface {
Informer() cache.SharedIndexInformer
Lister() cache.ClusterRoleLister
}
type clusterRoleInformer struct {
*sharedInformerFactory
}
func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(&rbac.ClusterRole{})
informer, exists := f.informers[informerType]
if exists {
return informer
}
informer = cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return f.internalclient.Rbac().ClusterRoles().List(convertListOptionsOrDie(options))
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return f.internalclient.Rbac().ClusterRoles().Watch(convertListOptionsOrDie(options))
},
},
&rbac.ClusterRole{},
f.defaultResync,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
f.informers[informerType] = informer
return informer
}
func (f *clusterRoleInformer) Lister() cache.ClusterRoleLister {
return cache.NewClusterRoleLister(f.Informer().GetIndexer())
}
type ClusterRoleBindingInformer interface {
Informer() cache.SharedIndexInformer
Lister() cache.ClusterRoleBindingLister
}
type clusterRoleBindingInformer struct {
*sharedInformerFactory
}
func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(&rbac.ClusterRoleBinding{})
informer, exists := f.informers[informerType]
if exists {
return informer
}
informer = cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return f.internalclient.Rbac().ClusterRoleBindings().List(convertListOptionsOrDie(options))
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return f.internalclient.Rbac().ClusterRoleBindings().Watch(convertListOptionsOrDie(options))
},
},
&rbac.ClusterRoleBinding{},
f.defaultResync,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
f.informers[informerType] = informer
return informer
}
func (f *clusterRoleBindingInformer) Lister() cache.ClusterRoleBindingLister {
return cache.NewClusterRoleBindingLister(f.Informer().GetIndexer())
}
type RoleInformer interface {
Informer() cache.SharedIndexInformer
Lister() cache.RoleLister
}
type roleInformer struct {
*sharedInformerFactory
}
func (f *roleInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(&rbac.Role{})
informer, exists := f.informers[informerType]
if exists {
return informer
}
informer = cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return f.internalclient.Rbac().Roles(v1.NamespaceAll).List(convertListOptionsOrDie(options))
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return f.internalclient.Rbac().Roles(v1.NamespaceAll).Watch(convertListOptionsOrDie(options))
},
},
&rbac.Role{},
f.defaultResync,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
f.informers[informerType] = informer
return informer
}
func (f *roleInformer) Lister() cache.RoleLister {
return cache.NewRoleLister(f.Informer().GetIndexer())
}
type RoleBindingInformer interface {
Informer() cache.SharedIndexInformer
Lister() cache.RoleBindingLister
}
type roleBindingInformer struct {
*sharedInformerFactory
}
func (f *roleBindingInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(&rbac.RoleBinding{})
informer, exists := f.informers[informerType]
if exists {
return informer
}
informer = cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return f.internalclient.Rbac().RoleBindings(v1.NamespaceAll).List(convertListOptionsOrDie(options))
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return f.internalclient.Rbac().RoleBindings(v1.NamespaceAll).Watch(convertListOptionsOrDie(options))
},
},
&rbac.RoleBinding{},
f.defaultResync,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
f.informers[informerType] = informer
return informer
}
func (f *roleBindingInformer) Lister() cache.RoleBindingLister {
return cache.NewRoleBindingLister(f.Informer().GetIndexer())
}
func convertListOptionsOrDie(in v1.ListOptions) api.ListOptions {
out := api.ListOptions{}
if err := api.Scheme.Convert(&in, &out, nil); err != nil {
panic(err)
}
return out
}

View file

@ -0,0 +1,70 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package informers
import (
"reflect"
"k8s.io/kubernetes/pkg/api/v1"
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch"
)
// StorageClassInformer is type of SharedIndexInformer which watches and lists all storage classes.
// Interface provides constructor for informer and lister for storage classes
type StorageClassInformer interface {
Informer() cache.SharedIndexInformer
Lister() cache.StorageClassLister
}
type storageClassInformer struct {
*sharedInformerFactory
}
func (f *storageClassInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(&storage.StorageClass{})
informer, exists := f.informers[informerType]
if exists {
return informer
}
informer = cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return f.client.Storage().StorageClasses().List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return f.client.Storage().StorageClasses().Watch(options)
},
},
&storage.StorageClass{},
f.defaultResync,
cache.Indexers{},
)
f.informers[informerType] = informer
return informer
}
func (f *storageClassInformer) Lister() cache.StorageClassLister {
informer := f.Informer()
return cache.NewStorageClassLister(informer.GetIndexer())
}

66
vendor/k8s.io/kubernetes/pkg/controller/job/BUILD generated vendored Normal file
View file

@ -0,0 +1,66 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"jobcontroller.go",
"utils.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/errors:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/batch/v1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/core/v1:go_default_library",
"//pkg/client/listers/batch/v1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/informers:go_default_library",
"//pkg/util/metrics:go_default_library",
"//pkg/util/runtime:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/util/workqueue:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = [
"jobcontroller_test.go",
"utils_test.go",
],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apimachinery/registered:go_default_library",
"//pkg/apis/batch/v1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/client/restclient:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/informers:go_default_library",
"//pkg/util/rand:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/watch:go_default_library",
],
)

18
vendor/k8s.io/kubernetes/pkg/controller/job/doc.go generated vendored Normal file
View file

@ -0,0 +1,18 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package job contains logic for watching and synchronizing jobs.
package job // import "k8s.io/kubernetes/pkg/controller/job"

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

31
vendor/k8s.io/kubernetes/pkg/controller/job/utils.go generated vendored Normal file
View file

@ -0,0 +1,31 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package job
import (
"k8s.io/kubernetes/pkg/api/v1"
batch "k8s.io/kubernetes/pkg/apis/batch/v1"
)
func IsJobFinished(j *batch.Job) bool {
for _, c := range j.Status.Conditions {
if (c.Type == batch.JobComplete || c.Type == batch.JobFailed) && c.Status == v1.ConditionTrue {
return true
}
}
return false
}

View file

@ -0,0 +1,49 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package job
import (
"testing"
"k8s.io/kubernetes/pkg/api/v1"
batch "k8s.io/kubernetes/pkg/apis/batch/v1"
)
func TestIsJobFinished(t *testing.T) {
job := &batch.Job{
Status: batch.JobStatus{
Conditions: []batch.JobCondition{{
Type: batch.JobComplete,
Status: v1.ConditionTrue,
}},
},
}
if !IsJobFinished(job) {
t.Error("Job was expected to be finished")
}
job.Status.Conditions[0].Status = v1.ConditionFalse
if IsJobFinished(job) {
t.Error("Job was not expected to be finished")
}
job.Status.Conditions[0].Status = v1.ConditionUnknown
if IsJobFinished(job) {
t.Error("Job was not expected to be finished")
}
}

View file

@ -0,0 +1,92 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"hash/adler32"
"sync"
"github.com/golang/groupcache/lru"
"k8s.io/kubernetes/pkg/api/meta"
hashutil "k8s.io/kubernetes/pkg/util/hash"
)
type objectWithMeta interface {
meta.Object
}
// keyFunc returns the key of an object, which is used to look up in the cache for it's matching object.
// Since we match objects by namespace and Labels/Selector, so if two objects have the same namespace and labels,
// they will have the same key.
func keyFunc(obj objectWithMeta) uint64 {
hash := adler32.New()
hashutil.DeepHashObject(hash, &equivalenceLabelObj{
namespace: obj.GetNamespace(),
labels: obj.GetLabels(),
})
return uint64(hash.Sum32())
}
type equivalenceLabelObj struct {
namespace string
labels map[string]string
}
// MatchingCache save label and selector matching relationship
type MatchingCache struct {
mutex sync.RWMutex
cache *lru.Cache
}
// NewMatchingCache return a NewMatchingCache, which save label and selector matching relationship.
func NewMatchingCache(maxCacheEntries int) *MatchingCache {
return &MatchingCache{
cache: lru.New(maxCacheEntries),
}
}
// Add will add matching information to the cache.
func (c *MatchingCache) Add(labelObj objectWithMeta, selectorObj objectWithMeta) {
key := keyFunc(labelObj)
c.mutex.Lock()
defer c.mutex.Unlock()
c.cache.Add(key, selectorObj)
}
// GetMatchingObject lookup the matching object for a given object.
// Note: the cache information may be invalid since the controller may be deleted or updated,
// we need check in the external request to ensure the cache data is not dirty.
func (c *MatchingCache) GetMatchingObject(labelObj objectWithMeta) (controller interface{}, exists bool) {
key := keyFunc(labelObj)
// NOTE: we use Lock() instead of RLock() here because lru's Get() method also modifies state(
// it need update the least recently usage information). So we can not call it concurrently.
c.mutex.Lock()
defer c.mutex.Unlock()
return c.cache.Get(key)
}
// Update update the cached matching information.
func (c *MatchingCache) Update(labelObj objectWithMeta, selectorObj objectWithMeta) {
c.Add(labelObj, selectorObj)
}
// InvalidateAll invalidate the whole cache.
func (c *MatchingCache) InvalidateAll() {
c.mutex.Lock()
defer c.mutex.Unlock()
c.cache = lru.New(c.cache.MaxEntries)
}

View file

@ -0,0 +1,61 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"namespace_controller.go",
"namespace_controller_utils.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/errors:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/typed/dynamic:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/runtime/schema:go_default_library",
"//pkg/util/metrics:go_default_library",
"//pkg/util/runtime:go_default_library",
"//pkg/util/sets:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/util/workqueue:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = ["namespace_controller_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/errors:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apimachinery/registered:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/client/restclient:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//pkg/client/typed/dynamic:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/runtime/schema:go_default_library",
"//pkg/util/sets:go_default_library",
],
)

View file

@ -0,0 +1,18 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// namespace contains a controller that handles namespace lifecycle
package namespace // import "k8s.io/kubernetes/pkg/controller/namespace"

View file

@ -0,0 +1,219 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package namespace
import (
"time"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/typed/dynamic"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/util/metrics"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/util/workqueue"
"k8s.io/kubernetes/pkg/watch"
"github.com/golang/glog"
)
const (
// namespaceDeletionGracePeriod is the time period to wait before processing a received namespace event.
// This allows time for the following to occur:
// * lifecycle admission plugins on HA apiservers to also observe a namespace
// deletion and prevent new objects from being created in the terminating namespace
// * non-leader etcd servers to observe last-minute object creations in a namespace
// so this controller's cleanup can actually clean up all objects
namespaceDeletionGracePeriod = 5 * time.Second
)
// NamespaceController is responsible for performing actions dependent upon a namespace phase
type NamespaceController struct {
// client that purges namespace content, must have list/delete privileges on all content
kubeClient clientset.Interface
// clientPool manages a pool of dynamic clients
clientPool dynamic.ClientPool
// store that holds the namespaces
store cache.Store
// controller that observes the namespaces
controller *cache.Controller
// namespaces that have been queued up for processing by workers
queue workqueue.RateLimitingInterface
// function to list of preferred group versions and their corresponding resource set for namespace deletion
groupVersionResourcesFn func() ([]schema.GroupVersionResource, error)
// opCache is a cache to remember if a particular operation is not supported to aid dynamic client.
opCache *operationNotSupportedCache
// finalizerToken is the finalizer token managed by this controller
finalizerToken v1.FinalizerName
}
// NewNamespaceController creates a new NamespaceController
func NewNamespaceController(
kubeClient clientset.Interface,
clientPool dynamic.ClientPool,
groupVersionResourcesFn func() ([]schema.GroupVersionResource, error),
resyncPeriod time.Duration,
finalizerToken v1.FinalizerName) *NamespaceController {
// the namespace deletion code looks at the discovery document to enumerate the set of resources on the server.
// it then finds all namespaced resources, and in response to namespace deletion, will call delete on all of them.
// unfortunately, the discovery information does not include the list of supported verbs/methods. if the namespace
// controller calls LIST/DELETECOLLECTION for a resource, it will get a 405 error from the server and cache that that was the case.
// we found in practice though that some auth engines when encountering paths they don't know about may return a 50x.
// until we have verbs, we pre-populate resources that do not support list or delete for well-known apis rather than
// probing the server once in order to be told no.
opCache := &operationNotSupportedCache{
m: make(map[operationKey]bool),
}
ignoredGroupVersionResources := []schema.GroupVersionResource{
{Group: "", Version: "v1", Resource: "bindings"},
}
for _, ignoredGroupVersionResource := range ignoredGroupVersionResources {
opCache.setNotSupported(operationKey{op: operationDeleteCollection, gvr: ignoredGroupVersionResource})
opCache.setNotSupported(operationKey{op: operationList, gvr: ignoredGroupVersionResource})
}
// create the controller so we can inject the enqueue function
namespaceController := &NamespaceController{
kubeClient: kubeClient,
clientPool: clientPool,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespace"),
groupVersionResourcesFn: groupVersionResourcesFn,
opCache: opCache,
finalizerToken: finalizerToken,
}
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("namespace_controller", kubeClient.Core().RESTClient().GetRateLimiter())
}
// configure the backing store/controller
store, controller := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return kubeClient.Core().Namespaces().List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return kubeClient.Core().Namespaces().Watch(options)
},
},
&v1.Namespace{},
resyncPeriod,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
namespace := obj.(*v1.Namespace)
namespaceController.enqueueNamespace(namespace)
},
UpdateFunc: func(oldObj, newObj interface{}) {
namespace := newObj.(*v1.Namespace)
namespaceController.enqueueNamespace(namespace)
},
},
)
namespaceController.store = store
namespaceController.controller = controller
return namespaceController
}
// enqueueNamespace adds an object to the controller work queue
// obj could be an *v1.Namespace, or a DeletionFinalStateUnknown item.
func (nm *NamespaceController) enqueueNamespace(obj interface{}) {
key, err := controller.KeyFunc(obj)
if err != nil {
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
return
}
// delay processing namespace events to allow HA api servers to observe namespace deletion,
// and HA etcd servers to observe last minute object creations inside the namespace
nm.queue.AddAfter(key, namespaceDeletionGracePeriod)
}
// worker processes the queue of namespace objects.
// Each namespace can be in the queue at most once.
// The system ensures that no two workers can process
// the same namespace at the same time.
func (nm *NamespaceController) worker() {
workFunc := func() bool {
key, quit := nm.queue.Get()
if quit {
return true
}
defer nm.queue.Done(key)
err := nm.syncNamespaceFromKey(key.(string))
if err == nil {
// no error, forget this entry and return
nm.queue.Forget(key)
return false
}
if estimate, ok := err.(*contentRemainingError); ok {
t := estimate.Estimate/2 + 1
glog.V(4).Infof("Content remaining in namespace %s, waiting %d seconds", key, t)
nm.queue.AddAfter(key, time.Duration(t)*time.Second)
} else {
// rather than wait for a full resync, re-add the namespace to the queue to be processed
nm.queue.AddRateLimited(key)
utilruntime.HandleError(err)
}
return false
}
for {
quit := workFunc()
if quit {
return
}
}
}
// syncNamespaceFromKey looks for a namespace with the specified key in its store and synchronizes it
func (nm *NamespaceController) syncNamespaceFromKey(key string) (err error) {
startTime := time.Now()
defer glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Now().Sub(startTime))
obj, exists, err := nm.store.GetByKey(key)
if !exists {
glog.Infof("Namespace has been deleted %v", key)
return nil
}
if err != nil {
glog.Errorf("Unable to retrieve namespace %v from store: %v", key, err)
nm.queue.Add(key)
return err
}
namespace := obj.(*v1.Namespace)
return syncNamespace(nm.kubeClient, nm.clientPool, nm.opCache, nm.groupVersionResourcesFn, namespace, nm.finalizerToken)
}
// Run starts observing the system with the specified number of workers.
func (nm *NamespaceController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
go nm.controller.Run(stopCh)
for i := 0; i < workers; i++ {
go wait.Until(nm.worker, time.Second, stopCh)
}
<-stopCh
glog.Infof("Shutting down NamespaceController")
nm.queue.ShutDown()
}

View file

@ -0,0 +1,305 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package namespace
import (
"fmt"
"net/http"
"net/http/httptest"
"path"
"strings"
"sync"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/client/typed/dynamic"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/util/sets"
)
func TestFinalized(t *testing.T) {
testNamespace := &v1.Namespace{
Spec: v1.NamespaceSpec{
Finalizers: []v1.FinalizerName{"a", "b"},
},
}
if finalized(testNamespace) {
t.Errorf("Unexpected result, namespace is not finalized")
}
testNamespace.Spec.Finalizers = []v1.FinalizerName{}
if !finalized(testNamespace) {
t.Errorf("Expected object to be finalized")
}
}
func TestFinalizeNamespaceFunc(t *testing.T) {
mockClient := &fake.Clientset{}
testNamespace := &v1.Namespace{
ObjectMeta: v1.ObjectMeta{
Name: "test",
ResourceVersion: "1",
},
Spec: v1.NamespaceSpec{
Finalizers: []v1.FinalizerName{"kubernetes", "other"},
},
}
finalizeNamespace(mockClient, testNamespace, v1.FinalizerKubernetes)
actions := mockClient.Actions()
if len(actions) != 1 {
t.Errorf("Expected 1 mock client action, but got %v", len(actions))
}
if !actions[0].Matches("create", "namespaces") || actions[0].GetSubresource() != "finalize" {
t.Errorf("Expected finalize-namespace action %v", actions[0])
}
finalizers := actions[0].(core.CreateAction).GetObject().(*v1.Namespace).Spec.Finalizers
if len(finalizers) != 1 {
t.Errorf("There should be a single finalizer remaining")
}
if "other" != string(finalizers[0]) {
t.Errorf("Unexpected finalizer value, %v", finalizers[0])
}
}
func testSyncNamespaceThatIsTerminating(t *testing.T, versions *metav1.APIVersions) {
now := metav1.Now()
namespaceName := "test"
testNamespacePendingFinalize := &v1.Namespace{
ObjectMeta: v1.ObjectMeta{
Name: namespaceName,
ResourceVersion: "1",
DeletionTimestamp: &now,
},
Spec: v1.NamespaceSpec{
Finalizers: []v1.FinalizerName{"kubernetes"},
},
Status: v1.NamespaceStatus{
Phase: v1.NamespaceTerminating,
},
}
testNamespaceFinalizeComplete := &v1.Namespace{
ObjectMeta: v1.ObjectMeta{
Name: namespaceName,
ResourceVersion: "1",
DeletionTimestamp: &now,
},
Spec: v1.NamespaceSpec{},
Status: v1.NamespaceStatus{
Phase: v1.NamespaceTerminating,
},
}
// when doing a delete all of content, we will do a GET of a collection, and DELETE of a collection by default
dynamicClientActionSet := sets.NewString()
groupVersionResources := testGroupVersionResources()
for _, groupVersionResource := range groupVersionResources {
urlPath := path.Join([]string{
dynamic.LegacyAPIPathResolverFunc(schema.GroupVersionKind{Group: groupVersionResource.Group, Version: groupVersionResource.Version}),
groupVersionResource.Group,
groupVersionResource.Version,
"namespaces",
namespaceName,
groupVersionResource.Resource,
}...)
dynamicClientActionSet.Insert((&fakeAction{method: "GET", path: urlPath}).String())
dynamicClientActionSet.Insert((&fakeAction{method: "DELETE", path: urlPath}).String())
}
scenarios := map[string]struct {
testNamespace *v1.Namespace
kubeClientActionSet sets.String
dynamicClientActionSet sets.String
gvrError error
}{
"pending-finalize": {
testNamespace: testNamespacePendingFinalize,
kubeClientActionSet: sets.NewString(
strings.Join([]string{"get", "namespaces", ""}, "-"),
strings.Join([]string{"create", "namespaces", "finalize"}, "-"),
strings.Join([]string{"list", "pods", ""}, "-"),
strings.Join([]string{"delete", "namespaces", ""}, "-"),
),
dynamicClientActionSet: dynamicClientActionSet,
},
"complete-finalize": {
testNamespace: testNamespaceFinalizeComplete,
kubeClientActionSet: sets.NewString(
strings.Join([]string{"get", "namespaces", ""}, "-"),
strings.Join([]string{"delete", "namespaces", ""}, "-"),
),
dynamicClientActionSet: sets.NewString(),
},
"groupVersionResourceErr": {
testNamespace: testNamespaceFinalizeComplete,
kubeClientActionSet: sets.NewString(
strings.Join([]string{"get", "namespaces", ""}, "-"),
strings.Join([]string{"delete", "namespaces", ""}, "-"),
),
dynamicClientActionSet: sets.NewString(),
gvrError: fmt.Errorf("test error"),
},
}
for scenario, testInput := range scenarios {
testHandler := &fakeActionHandler{statusCode: 200}
srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
defer srv.Close()
mockClient := fake.NewSimpleClientset(testInput.testNamespace)
clientPool := dynamic.NewClientPool(clientConfig, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
fn := func() ([]schema.GroupVersionResource, error) {
return groupVersionResources, nil
}
err := syncNamespace(mockClient, clientPool, &operationNotSupportedCache{m: make(map[operationKey]bool)}, fn, testInput.testNamespace, v1.FinalizerKubernetes)
if err != nil {
t.Errorf("scenario %s - Unexpected error when synching namespace %v", scenario, err)
}
// validate traffic from kube client
actionSet := sets.NewString()
for _, action := range mockClient.Actions() {
actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-"))
}
if !actionSet.Equal(testInput.kubeClientActionSet) {
t.Errorf("scenario %s - mock client expected actions:\n%v\n but got:\n%v\nDifference:\n%v", scenario,
testInput.kubeClientActionSet, actionSet, testInput.kubeClientActionSet.Difference(actionSet))
}
// validate traffic from dynamic client
actionSet = sets.NewString()
for _, action := range testHandler.actions {
actionSet.Insert(action.String())
}
if !actionSet.Equal(testInput.dynamicClientActionSet) {
t.Errorf("scenario %s - dynamic client expected actions:\n%v\n but got:\n%v\nDifference:\n%v", scenario,
testInput.dynamicClientActionSet, actionSet, testInput.dynamicClientActionSet.Difference(actionSet))
}
}
}
func TestRetryOnConflictError(t *testing.T) {
mockClient := &fake.Clientset{}
numTries := 0
retryOnce := func(kubeClient clientset.Interface, namespace *v1.Namespace) (*v1.Namespace, error) {
numTries++
if numTries <= 1 {
return namespace, errors.NewConflict(api.Resource("namespaces"), namespace.Name, fmt.Errorf("ERROR!"))
}
return namespace, nil
}
namespace := &v1.Namespace{}
_, err := retryOnConflictError(mockClient, namespace, retryOnce)
if err != nil {
t.Errorf("Unexpected error %v", err)
}
if numTries != 2 {
t.Errorf("Expected %v, but got %v", 2, numTries)
}
}
func TestSyncNamespaceThatIsTerminatingNonExperimental(t *testing.T) {
testSyncNamespaceThatIsTerminating(t, &metav1.APIVersions{})
}
func TestSyncNamespaceThatIsTerminatingV1Beta1(t *testing.T) {
testSyncNamespaceThatIsTerminating(t, &metav1.APIVersions{Versions: []string{"extensions/v1beta1"}})
}
func TestSyncNamespaceThatIsActive(t *testing.T) {
mockClient := &fake.Clientset{}
testNamespace := &v1.Namespace{
ObjectMeta: v1.ObjectMeta{
Name: "test",
ResourceVersion: "1",
},
Spec: v1.NamespaceSpec{
Finalizers: []v1.FinalizerName{"kubernetes"},
},
Status: v1.NamespaceStatus{
Phase: v1.NamespaceActive,
},
}
fn := func() ([]schema.GroupVersionResource, error) {
return testGroupVersionResources(), nil
}
err := syncNamespace(mockClient, nil, &operationNotSupportedCache{m: make(map[operationKey]bool)}, fn, testNamespace, v1.FinalizerKubernetes)
if err != nil {
t.Errorf("Unexpected error when synching namespace %v", err)
}
if len(mockClient.Actions()) != 0 {
t.Errorf("Expected no action from controller, but got: %v", mockClient.Actions())
}
}
// testServerAndClientConfig returns a server that listens and a config that can reference it
func testServerAndClientConfig(handler func(http.ResponseWriter, *http.Request)) (*httptest.Server, *restclient.Config) {
srv := httptest.NewServer(http.HandlerFunc(handler))
config := &restclient.Config{
Host: srv.URL,
}
return srv, config
}
// fakeAction records information about requests to aid in testing.
type fakeAction struct {
method string
path string
}
// String returns method=path to aid in testing
func (f *fakeAction) String() string {
return strings.Join([]string{f.method, f.path}, "=")
}
// fakeActionHandler holds a list of fakeActions received
type fakeActionHandler struct {
// statusCode returned by this handler
statusCode int
lock sync.Mutex
actions []fakeAction
}
// ServeHTTP logs the action that occurred and always returns the associated status code
func (f *fakeActionHandler) ServeHTTP(response http.ResponseWriter, request *http.Request) {
f.lock.Lock()
defer f.lock.Unlock()
f.actions = append(f.actions, fakeAction{method: request.Method, path: request.URL.Path})
response.Header().Set("Content-Type", runtime.ContentTypeJSON)
response.WriteHeader(f.statusCode)
response.Write([]byte("{\"kind\": \"List\",\"items\":null}"))
}
// testGroupVersionResources returns a mocked up set of resources across different api groups for testing namespace controller.
func testGroupVersionResources() []schema.GroupVersionResource {
results := []schema.GroupVersionResource{}
results = append(results, schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"})
results = append(results, schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"})
results = append(results, schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "deployments"})
return results
}

File diff suppressed because it is too large Load diff

92
vendor/k8s.io/kubernetes/pkg/controller/node/BUILD generated vendored Normal file
View file

@ -0,0 +1,92 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"cidr_allocator.go",
"cidr_set.go",
"controller_utils.go",
"doc.go",
"metrics.go",
"nodecontroller.go",
"rate_limited_queue.go",
"test_utils.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/errors:go_default_library",
"//pkg/api/resource:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/core/v1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/controller/informers:go_default_library",
"//pkg/fields:go_default_library",
"//pkg/kubelet/util/format:go_default_library",
"//pkg/labels:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/clock:go_default_library",
"//pkg/util/errors:go_default_library",
"//pkg/util/flowcontrol:go_default_library",
"//pkg/util/metrics:go_default_library",
"//pkg/util/node:go_default_library",
"//pkg/util/runtime:go_default_library",
"//pkg/util/sets:go_default_library",
"//pkg/util/system:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/version:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/golang/glog",
"//vendor:github.com/prometheus/client_golang/prometheus",
],
)
go_test(
name = "go_default_test",
srcs = [
"cidr_allocator_test.go",
"cidr_set_test.go",
"nodecontroller_test.go",
"rate_limited_queue_test.go",
],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/resource:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/fake:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/informers:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/diff:go_default_library",
"//pkg/util/flowcontrol:go_default_library",
"//pkg/util/node:go_default_library",
"//pkg/util/sets:go_default_library",
"//pkg/util/wait:go_default_library",
"//vendor:github.com/golang/glog",
],
)

View file

@ -0,0 +1,266 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"errors"
"fmt"
"net"
"sync"
apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait"
"github.com/golang/glog"
)
// TODO: figure out the good setting for those constants.
const (
// controls how many NodeSpec updates NC can process concurrently.
cidrUpdateWorkers = 10
cidrUpdateQueueSize = 5000
// podCIDRUpdateRetry controls the number of retries of writing Node.Spec.PodCIDR update.
podCIDRUpdateRetry = 5
)
var errCIDRRangeNoCIDRsRemaining = errors.New("CIDR allocation failed; there are no remaining CIDRs left to allocate in the accepted range")
type nodeAndCIDR struct {
cidr *net.IPNet
nodeName string
}
// CIDRAllocator is an interface implemented by things that know how to allocate/occupy/recycle CIDR for nodes.
type CIDRAllocator interface {
AllocateOrOccupyCIDR(node *v1.Node) error
ReleaseCIDR(node *v1.Node) error
}
type rangeAllocator struct {
client clientset.Interface
cidrs *cidrSet
clusterCIDR *net.IPNet
maxCIDRs int
// Channel that is used to pass updating Nodes with assigned CIDRs to the background
// This increases a throughput of CIDR assignment by not blocking on long operations.
nodeCIDRUpdateChannel chan nodeAndCIDR
recorder record.EventRecorder
// Keep a set of nodes that are currectly being processed to avoid races in CIDR allocation
sync.Mutex
nodesInProcessing sets.String
}
// NewCIDRRangeAllocator returns a CIDRAllocator to allocate CIDR for node
// Caller must ensure subNetMaskSize is not less than cluster CIDR mask size.
// Caller must always pass in a list of existing nodes so the new allocator
// can initialize its CIDR map. NodeList is only nil in testing.
func NewCIDRRangeAllocator(client clientset.Interface, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) {
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(v1.EventSource{Component: "cidrAllocator"})
eventBroadcaster.StartLogging(glog.Infof)
ra := &rangeAllocator{
client: client,
cidrs: newCIDRSet(clusterCIDR, subNetMaskSize),
clusterCIDR: clusterCIDR,
nodeCIDRUpdateChannel: make(chan nodeAndCIDR, cidrUpdateQueueSize),
recorder: recorder,
nodesInProcessing: sets.NewString(),
}
if serviceCIDR != nil {
ra.filterOutServiceRange(serviceCIDR)
} else {
glog.V(0).Info("No Service CIDR provided. Skipping filtering out service addresses.")
}
if nodeList != nil {
for _, node := range nodeList.Items {
if node.Spec.PodCIDR == "" {
glog.Infof("Node %v has no CIDR, ignoring", node.Name)
continue
} else {
glog.Infof("Node %v has CIDR %s, occupying it in CIDR map", node.Name, node.Spec.PodCIDR)
}
if err := ra.occupyCIDR(&node); err != nil {
// This will happen if:
// 1. We find garbage in the podCIDR field. Retrying is useless.
// 2. CIDR out of range: This means a node CIDR has changed.
// This error will keep crashing controller-manager.
return nil, err
}
}
}
for i := 0; i < cidrUpdateWorkers; i++ {
go func(stopChan <-chan struct{}) {
for {
select {
case workItem, ok := <-ra.nodeCIDRUpdateChannel:
if !ok {
glog.Warning("NodeCIDRUpdateChannel read returned false.")
return
}
ra.updateCIDRAllocation(workItem)
case <-stopChan:
return
}
}
}(wait.NeverStop)
}
return ra, nil
}
func (r *rangeAllocator) insertNodeToProcessing(nodeName string) bool {
r.Lock()
defer r.Unlock()
if r.nodesInProcessing.Has(nodeName) {
return false
}
r.nodesInProcessing.Insert(nodeName)
return true
}
func (r *rangeAllocator) removeNodeFromProcessing(nodeName string) {
r.Lock()
defer r.Unlock()
r.nodesInProcessing.Delete(nodeName)
}
func (r *rangeAllocator) occupyCIDR(node *v1.Node) error {
defer r.removeNodeFromProcessing(node.Name)
if node.Spec.PodCIDR == "" {
return nil
}
_, podCIDR, err := net.ParseCIDR(node.Spec.PodCIDR)
if err != nil {
return fmt.Errorf("failed to parse node %s, CIDR %s", node.Name, node.Spec.PodCIDR)
}
if err := r.cidrs.occupy(podCIDR); err != nil {
return fmt.Errorf("failed to mark cidr as occupied: %v", err)
}
return nil
}
// AllocateOrOccupyCIDR looks at the given node, assigns it a valid CIDR
// if it doesn't currently have one or mark the CIDR as used if the node already have one.
// WARNING: If you're adding any return calls or defer any more work from this function
// you have to handle correctly nodesInProcessing.
func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
if node == nil {
return nil
}
if !r.insertNodeToProcessing(node.Name) {
glog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name)
return nil
}
if node.Spec.PodCIDR != "" {
return r.occupyCIDR(node)
}
podCIDR, err := r.cidrs.allocateNext()
if err != nil {
r.removeNodeFromProcessing(node.Name)
recordNodeStatusChange(r.recorder, node, "CIDRNotAvailable")
return fmt.Errorf("failed to allocate cidr: %v", err)
}
glog.V(10).Infof("Putting node %s with CIDR %s into the work queue", node.Name, podCIDR)
r.nodeCIDRUpdateChannel <- nodeAndCIDR{
nodeName: node.Name,
cidr: podCIDR,
}
return nil
}
// ReleaseCIDR releases the CIDR of the removed node
func (r *rangeAllocator) ReleaseCIDR(node *v1.Node) error {
if node == nil || node.Spec.PodCIDR == "" {
return nil
}
_, podCIDR, err := net.ParseCIDR(node.Spec.PodCIDR)
if err != nil {
return fmt.Errorf("Failed to parse CIDR %s on Node %v: %v", node.Spec.PodCIDR, node.Name, err)
}
glog.V(4).Infof("release CIDR %s", node.Spec.PodCIDR)
if err = r.cidrs.release(podCIDR); err != nil {
return fmt.Errorf("Error when releasing CIDR %v: %v", node.Spec.PodCIDR, err)
}
return err
}
// Marks all CIDRs with subNetMaskSize that belongs to serviceCIDR as used,
// so that they won't be assignable.
func (r *rangeAllocator) filterOutServiceRange(serviceCIDR *net.IPNet) {
// Checks if service CIDR has a nonempty intersection with cluster CIDR. It is the case if either
// clusterCIDR contains serviceCIDR with clusterCIDR's Mask applied (this means that clusterCIDR contains serviceCIDR)
// or vice versa (which means that serviceCIDR contains clusterCIDR).
if !r.clusterCIDR.Contains(serviceCIDR.IP.Mask(r.clusterCIDR.Mask)) && !serviceCIDR.Contains(r.clusterCIDR.IP.Mask(serviceCIDR.Mask)) {
return
}
if err := r.cidrs.occupy(serviceCIDR); err != nil {
glog.Errorf("Error filtering out service cidr %v: %v", serviceCIDR, err)
}
}
// Assigns CIDR to Node and sends an update to the API server.
func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error {
var err error
var node *v1.Node
defer r.removeNodeFromProcessing(data.nodeName)
for rep := 0; rep < podCIDRUpdateRetry; rep++ {
// TODO: change it to using PATCH instead of full Node updates.
node, err = r.client.Core().Nodes().Get(data.nodeName)
if err != nil {
glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", data.nodeName, err)
continue
}
if node.Spec.PodCIDR != "" {
glog.Errorf("Node %v already has allocated CIDR %v. Releasing assigned one if different.", node.Name, node.Spec.PodCIDR)
if node.Spec.PodCIDR != data.cidr.String() {
if err := r.cidrs.release(data.cidr); err != nil {
glog.Errorf("Error when releasing CIDR %v", data.cidr.String())
}
}
return nil
}
node.Spec.PodCIDR = data.cidr.String()
if _, err := r.client.Core().Nodes().Update(node); err != nil {
glog.Errorf("Failed while updating Node.Spec.PodCIDR (%d retries left): %v", podCIDRUpdateRetry-rep-1, err)
} else {
break
}
}
if err != nil {
recordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed")
// We accept the fact that we may leek CIDRs here. This is safer than releasing
// them in case when we don't know if request went through.
// NodeController restart will return all falsely allocated CIDRs to the pool.
if !apierrors.IsServerTimeout(err) {
glog.Errorf("CIDR assignment for node %v failed: %v. Releasing allocated CIDR", data.nodeName, err)
if releaseErr := r.cidrs.release(data.cidr); releaseErr != nil {
glog.Errorf("Error releasing allocated CIDR for node %v: %v", data.nodeName, releaseErr)
}
}
}
return err
}

View file

@ -0,0 +1,396 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"net"
"testing"
"time"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/util/wait"
)
const (
nodePollInterval = 100 * time.Millisecond
)
func waitForUpdatedNodeWithTimeout(nodeHandler *FakeNodeHandler, number int, timeout time.Duration) error {
return wait.Poll(nodePollInterval, timeout, func() (bool, error) {
if len(nodeHandler.getUpdatedNodesCopy()) >= number {
return true, nil
}
return false, nil
})
}
func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
testCases := []struct {
description string
fakeNodeHandler *FakeNodeHandler
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
subNetMaskSize int
expectedAllocatedCIDR string
allocatedCIDRs []string
}{
{
description: "When there's no ServiceCIDR return first CIDR in range",
fakeNodeHandler: &FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
Name: "node0",
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDR: func() *net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24")
return clusterCIDR
}(),
serviceCIDR: nil,
subNetMaskSize: 30,
expectedAllocatedCIDR: "127.123.234.0/30",
},
{
description: "Correctly filter out ServiceCIDR",
fakeNodeHandler: &FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
Name: "node0",
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDR: func() *net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24")
return clusterCIDR
}(),
serviceCIDR: func() *net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/26")
return clusterCIDR
}(),
subNetMaskSize: 30,
// it should return first /30 CIDR after service range
expectedAllocatedCIDR: "127.123.234.64/30",
},
{
description: "Correctly ignore already allocated CIDRs",
fakeNodeHandler: &FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
Name: "node0",
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDR: func() *net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24")
return clusterCIDR
}(),
serviceCIDR: func() *net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/26")
return clusterCIDR
}(),
subNetMaskSize: 30,
allocatedCIDRs: []string{"127.123.234.64/30", "127.123.234.68/30", "127.123.234.72/30", "127.123.234.80/30"},
expectedAllocatedCIDR: "127.123.234.76/30",
},
}
testFunc := func(tc struct {
description string
fakeNodeHandler *FakeNodeHandler
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
subNetMaskSize int
expectedAllocatedCIDR string
allocatedCIDRs []string
}) {
allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, tc.clusterCIDR, tc.serviceCIDR, tc.subNetMaskSize, nil)
// this is a bit of white box testing
for _, allocated := range tc.allocatedCIDRs {
_, cidr, err := net.ParseCIDR(allocated)
if err != nil {
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
}
rangeAllocator, ok := allocator.(*rangeAllocator)
if !ok {
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
return
}
if err = rangeAllocator.cidrs.occupy(cidr); err != nil {
t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err)
}
}
if err := allocator.AllocateOrOccupyCIDR(tc.fakeNodeHandler.Existing[0]); err != nil {
t.Errorf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
}
if err := waitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil {
t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err)
}
found := false
seenCIDRs := []string{}
for _, updatedNode := range tc.fakeNodeHandler.getUpdatedNodesCopy() {
seenCIDRs = append(seenCIDRs, updatedNode.Spec.PodCIDR)
if updatedNode.Spec.PodCIDR == tc.expectedAllocatedCIDR {
found = true
break
}
}
if !found {
t.Errorf("%v: Unable to find allocated CIDR %v, found updated Nodes with CIDRs: %v",
tc.description, tc.expectedAllocatedCIDR, seenCIDRs)
}
}
for _, tc := range testCases {
testFunc(tc)
}
}
func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
testCases := []struct {
description string
fakeNodeHandler *FakeNodeHandler
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
subNetMaskSize int
allocatedCIDRs []string
}{
{
description: "When there's no ServiceCIDR return first CIDR in range",
fakeNodeHandler: &FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
Name: "node0",
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDR: func() *net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28")
return clusterCIDR
}(),
serviceCIDR: nil,
subNetMaskSize: 30,
allocatedCIDRs: []string{"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"},
},
}
testFunc := func(tc struct {
description string
fakeNodeHandler *FakeNodeHandler
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
subNetMaskSize int
allocatedCIDRs []string
}) {
allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, tc.clusterCIDR, tc.serviceCIDR, tc.subNetMaskSize, nil)
// this is a bit of white box testing
for _, allocated := range tc.allocatedCIDRs {
_, cidr, err := net.ParseCIDR(allocated)
if err != nil {
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
}
rangeAllocator, ok := allocator.(*rangeAllocator)
if !ok {
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
return
}
err = rangeAllocator.cidrs.occupy(cidr)
if err != nil {
t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err)
}
}
if err := allocator.AllocateOrOccupyCIDR(tc.fakeNodeHandler.Existing[0]); err == nil {
t.Errorf("%v: unexpected success in AllocateOrOccupyCIDR: %v", tc.description, err)
}
// We don't expect any updates, so just sleep for some time
time.Sleep(time.Second)
if len(tc.fakeNodeHandler.getUpdatedNodesCopy()) != 0 {
t.Fatalf("%v: unexpected update of nodes: %v", tc.description, tc.fakeNodeHandler.getUpdatedNodesCopy())
}
seenCIDRs := []string{}
for _, updatedNode := range tc.fakeNodeHandler.getUpdatedNodesCopy() {
if updatedNode.Spec.PodCIDR != "" {
seenCIDRs = append(seenCIDRs, updatedNode.Spec.PodCIDR)
}
}
if len(seenCIDRs) != 0 {
t.Errorf("%v: Seen assigned CIDRs when not expected: %v",
tc.description, seenCIDRs)
}
}
for _, tc := range testCases {
testFunc(tc)
}
}
func TestReleaseCIDRSuccess(t *testing.T) {
testCases := []struct {
description string
fakeNodeHandler *FakeNodeHandler
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
subNetMaskSize int
expectedAllocatedCIDRFirstRound string
expectedAllocatedCIDRSecondRound string
allocatedCIDRs []string
cidrsToRelease []string
}{
{
description: "Correctly release preallocated CIDR",
fakeNodeHandler: &FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
Name: "node0",
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDR: func() *net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28")
return clusterCIDR
}(),
serviceCIDR: nil,
subNetMaskSize: 30,
allocatedCIDRs: []string{"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"},
expectedAllocatedCIDRFirstRound: "",
cidrsToRelease: []string{"127.123.234.4/30"},
expectedAllocatedCIDRSecondRound: "127.123.234.4/30",
},
{
description: "Correctly recycle CIDR",
fakeNodeHandler: &FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
Name: "node0",
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDR: func() *net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28")
return clusterCIDR
}(),
serviceCIDR: nil,
subNetMaskSize: 30,
expectedAllocatedCIDRFirstRound: "127.123.234.0/30",
cidrsToRelease: []string{"127.123.234.0/30"},
expectedAllocatedCIDRSecondRound: "127.123.234.0/30",
},
}
testFunc := func(tc struct {
description string
fakeNodeHandler *FakeNodeHandler
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
subNetMaskSize int
expectedAllocatedCIDRFirstRound string
expectedAllocatedCIDRSecondRound string
allocatedCIDRs []string
cidrsToRelease []string
}) {
allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, tc.clusterCIDR, tc.serviceCIDR, tc.subNetMaskSize, nil)
// this is a bit of white box testing
for _, allocated := range tc.allocatedCIDRs {
_, cidr, err := net.ParseCIDR(allocated)
if err != nil {
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
}
rangeAllocator, ok := allocator.(*rangeAllocator)
if !ok {
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
return
}
err = rangeAllocator.cidrs.occupy(cidr)
if err != nil {
t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err)
}
}
err := allocator.AllocateOrOccupyCIDR(tc.fakeNodeHandler.Existing[0])
if tc.expectedAllocatedCIDRFirstRound != "" {
if err != nil {
t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
}
if err := waitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil {
t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err)
}
} else {
if err == nil {
t.Fatalf("%v: unexpected success in AllocateOrOccupyCIDR: %v", tc.description, err)
}
// We don't expect any updates here
time.Sleep(time.Second)
if len(tc.fakeNodeHandler.getUpdatedNodesCopy()) != 0 {
t.Fatalf("%v: unexpected update of nodes: %v", tc.description, tc.fakeNodeHandler.getUpdatedNodesCopy())
}
}
for _, cidrToRelease := range tc.cidrsToRelease {
nodeToRelease := v1.Node{
ObjectMeta: v1.ObjectMeta{
Name: "node0",
},
}
nodeToRelease.Spec.PodCIDR = cidrToRelease
err = allocator.ReleaseCIDR(&nodeToRelease)
if err != nil {
t.Fatalf("%v: unexpected error in ReleaseCIDR: %v", tc.description, err)
}
}
if err = allocator.AllocateOrOccupyCIDR(tc.fakeNodeHandler.Existing[0]); err != nil {
t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
}
if err := waitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil {
t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err)
}
found := false
seenCIDRs := []string{}
for _, updatedNode := range tc.fakeNodeHandler.getUpdatedNodesCopy() {
seenCIDRs = append(seenCIDRs, updatedNode.Spec.PodCIDR)
if updatedNode.Spec.PodCIDR == tc.expectedAllocatedCIDRSecondRound {
found = true
break
}
}
if !found {
t.Errorf("%v: Unable to find allocated CIDR %v, found updated Nodes with CIDRs: %v",
tc.description, tc.expectedAllocatedCIDRSecondRound, seenCIDRs)
}
}
for _, tc := range testCases {
testFunc(tc)
}
}

View file

@ -0,0 +1,150 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"encoding/binary"
"fmt"
"math/big"
"net"
"sync"
)
type cidrSet struct {
sync.Mutex
clusterCIDR *net.IPNet
clusterIP net.IP
clusterMaskSize int
maxCIDRs int
nextCandidate int
used big.Int
subNetMaskSize int
}
func newCIDRSet(clusterCIDR *net.IPNet, subNetMaskSize int) *cidrSet {
clusterMask := clusterCIDR.Mask
clusterMaskSize, _ := clusterMask.Size()
maxCIDRs := 1 << uint32(subNetMaskSize-clusterMaskSize)
return &cidrSet{
clusterCIDR: clusterCIDR,
clusterIP: clusterCIDR.IP.To4(),
clusterMaskSize: clusterMaskSize,
maxCIDRs: maxCIDRs,
subNetMaskSize: subNetMaskSize,
}
}
func (s *cidrSet) allocateNext() (*net.IPNet, error) {
s.Lock()
defer s.Unlock()
nextUnused := -1
for i := 0; i < s.maxCIDRs; i++ {
candidate := (i + s.nextCandidate) % s.maxCIDRs
if s.used.Bit(candidate) == 0 {
nextUnused = candidate
break
}
}
if nextUnused == -1 {
return nil, errCIDRRangeNoCIDRsRemaining
}
s.nextCandidate = (nextUnused + 1) % s.maxCIDRs
s.used.SetBit(&s.used, nextUnused, 1)
j := uint32(nextUnused) << uint32(32-s.subNetMaskSize)
ipInt := (binary.BigEndian.Uint32(s.clusterIP)) | j
ip := make([]byte, 4)
binary.BigEndian.PutUint32(ip, ipInt)
return &net.IPNet{
IP: ip,
Mask: net.CIDRMask(s.subNetMaskSize, 32),
}, nil
}
func (s *cidrSet) getBeginingAndEndIndices(cidr *net.IPNet) (begin, end int, err error) {
begin, end = 0, s.maxCIDRs
cidrMask := cidr.Mask
maskSize, _ := cidrMask.Size()
if !s.clusterCIDR.Contains(cidr.IP.Mask(s.clusterCIDR.Mask)) && !cidr.Contains(s.clusterCIDR.IP.Mask(cidr.Mask)) {
return -1, -1, fmt.Errorf("cidr %v is out the range of cluster cidr %v", cidr, s.clusterCIDR)
}
if s.clusterMaskSize < maskSize {
subNetMask := net.CIDRMask(s.subNetMaskSize, 32)
begin, err = s.getIndexForCIDR(&net.IPNet{
IP: cidr.IP.To4().Mask(subNetMask),
Mask: subNetMask,
})
if err != nil {
return -1, -1, err
}
ip := make([]byte, 4)
ipInt := binary.BigEndian.Uint32(cidr.IP) | (^binary.BigEndian.Uint32(cidr.Mask))
binary.BigEndian.PutUint32(ip, ipInt)
end, err = s.getIndexForCIDR(&net.IPNet{
IP: net.IP(ip).To4().Mask(subNetMask),
Mask: subNetMask,
})
if err != nil {
return -1, -1, err
}
}
return begin, end, nil
}
func (s *cidrSet) release(cidr *net.IPNet) error {
begin, end, err := s.getBeginingAndEndIndices(cidr)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
for i := begin; i <= end; i++ {
s.used.SetBit(&s.used, i, 0)
}
return nil
}
func (s *cidrSet) occupy(cidr *net.IPNet) (err error) {
begin, end, err := s.getBeginingAndEndIndices(cidr)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
for i := begin; i <= end; i++ {
s.used.SetBit(&s.used, i, 1)
}
return nil
}
func (s *cidrSet) getIndexForCIDR(cidr *net.IPNet) (int, error) {
cidrIndex := (binary.BigEndian.Uint32(s.clusterIP) ^ binary.BigEndian.Uint32(cidr.IP.To4())) >> uint32(32-s.subNetMaskSize)
if cidrIndex >= uint32(s.maxCIDRs) {
return 0, fmt.Errorf("CIDR: %v is out of the range of CIDR allocator", cidr)
}
return int(cidrIndex), nil
}

View file

@ -0,0 +1,336 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"math/big"
"net"
"reflect"
"testing"
"github.com/golang/glog"
)
func TestCIDRSetFullyAllocated(t *testing.T) {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/30")
a := newCIDRSet(clusterCIDR, 30)
p, err := a.allocateNext()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if p.String() != "127.123.234.0/30" {
t.Fatalf("unexpected allocated cidr: %s", p.String())
}
_, err = a.allocateNext()
if err == nil {
t.Fatalf("expected error because of fully-allocated range")
}
a.release(p)
p, err = a.allocateNext()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if p.String() != "127.123.234.0/30" {
t.Fatalf("unexpected allocated cidr: %s", p.String())
}
_, err = a.allocateNext()
if err == nil {
t.Fatalf("expected error because of fully-allocated range")
}
}
func TestCIDRSet_RandomishAllocation(t *testing.T) {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/16")
a := newCIDRSet(clusterCIDR, 24)
// allocate all the CIDRs
var err error
cidrs := make([]*net.IPNet, 256)
for i := 0; i < 256; i++ {
cidrs[i], err = a.allocateNext()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
}
_, err = a.allocateNext()
if err == nil {
t.Fatalf("expected error because of fully-allocated range")
}
// release them all
for i := 0; i < 256; i++ {
a.release(cidrs[i])
}
// allocate the CIDRs again
rcidrs := make([]*net.IPNet, 256)
for i := 0; i < 256; i++ {
rcidrs[i], err = a.allocateNext()
if err != nil {
t.Fatalf("unexpected error: %d, %v", i, err)
}
}
_, err = a.allocateNext()
if err == nil {
t.Fatalf("expected error because of fully-allocated range")
}
if !reflect.DeepEqual(cidrs, rcidrs) {
t.Fatalf("expected re-allocated cidrs are the same collection")
}
}
func TestCIDRSet_AllocationOccupied(t *testing.T) {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/16")
a := newCIDRSet(clusterCIDR, 24)
// allocate all the CIDRs
var err error
cidrs := make([]*net.IPNet, 256)
for i := 0; i < 256; i++ {
cidrs[i], err = a.allocateNext()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
}
_, err = a.allocateNext()
if err == nil {
t.Fatalf("expected error because of fully-allocated range")
}
// release them all
for i := 0; i < 256; i++ {
a.release(cidrs[i])
}
// occupy the last 128 CIDRs
for i := 128; i < 256; i++ {
a.occupy(cidrs[i])
}
// allocate the first 128 CIDRs again
rcidrs := make([]*net.IPNet, 128)
for i := 0; i < 128; i++ {
rcidrs[i], err = a.allocateNext()
if err != nil {
t.Fatalf("unexpected error: %d, %v", i, err)
}
}
_, err = a.allocateNext()
if err == nil {
t.Fatalf("expected error because of fully-allocated range")
}
// check Occupy() work properly
for i := 128; i < 256; i++ {
rcidrs = append(rcidrs, cidrs[i])
}
if !reflect.DeepEqual(cidrs, rcidrs) {
t.Fatalf("expected re-allocated cidrs are the same collection")
}
}
func TestGetBitforCIDR(t *testing.T) {
cases := []struct {
clusterCIDRStr string
subNetMaskSize int
subNetCIDRStr string
expectedBit int
expectErr bool
}{
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 16,
subNetCIDRStr: "127.0.0.0/16",
expectedBit: 0,
expectErr: false,
},
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 16,
subNetCIDRStr: "127.123.0.0/16",
expectedBit: 123,
expectErr: false,
},
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 16,
subNetCIDRStr: "127.168.0.0/16",
expectedBit: 168,
expectErr: false,
},
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 16,
subNetCIDRStr: "127.224.0.0/16",
expectedBit: 224,
expectErr: false,
},
{
clusterCIDRStr: "192.168.0.0/16",
subNetMaskSize: 24,
subNetCIDRStr: "192.168.12.0/24",
expectedBit: 12,
expectErr: false,
},
{
clusterCIDRStr: "192.168.0.0/16",
subNetMaskSize: 24,
subNetCIDRStr: "192.168.151.0/24",
expectedBit: 151,
expectErr: false,
},
{
clusterCIDRStr: "192.168.0.0/16",
subNetMaskSize: 24,
subNetCIDRStr: "127.168.224.0/24",
expectErr: true,
},
}
for _, tc := range cases {
_, clusterCIDR, err := net.ParseCIDR(tc.clusterCIDRStr)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
cs := newCIDRSet(clusterCIDR, tc.subNetMaskSize)
_, subnetCIDR, err := net.ParseCIDR(tc.subNetCIDRStr)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
got, err := cs.getIndexForCIDR(subnetCIDR)
if err == nil && tc.expectErr {
glog.Errorf("expected error but got null")
continue
}
if err != nil && !tc.expectErr {
glog.Errorf("unexpected error: %v", err)
continue
}
if got != tc.expectedBit {
glog.Errorf("expected %v, but got %v", tc.expectedBit, got)
}
}
}
func TestOccupy(t *testing.T) {
cases := []struct {
clusterCIDRStr string
subNetMaskSize int
subNetCIDRStr string
expectedUsedBegin int
expectedUsedEnd int
expectErr bool
}{
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 16,
subNetCIDRStr: "127.0.0.0/8",
expectedUsedBegin: 0,
expectedUsedEnd: 256,
expectErr: false,
},
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 16,
subNetCIDRStr: "127.0.0.0/2",
expectedUsedBegin: 0,
expectedUsedEnd: 256,
expectErr: false,
},
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 16,
subNetCIDRStr: "127.0.0.0/16",
expectedUsedBegin: 0,
expectedUsedEnd: 0,
expectErr: false,
},
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 32,
subNetCIDRStr: "127.0.0.0/16",
expectedUsedBegin: 0,
expectedUsedEnd: 65535,
expectErr: false,
},
{
clusterCIDRStr: "127.0.0.0/7",
subNetMaskSize: 16,
subNetCIDRStr: "127.0.0.0/15",
expectedUsedBegin: 256,
expectedUsedEnd: 257,
expectErr: false,
},
{
clusterCIDRStr: "127.0.0.0/7",
subNetMaskSize: 15,
subNetCIDRStr: "127.0.0.0/15",
expectedUsedBegin: 128,
expectedUsedEnd: 128,
expectErr: false,
},
{
clusterCIDRStr: "127.0.0.0/7",
subNetMaskSize: 18,
subNetCIDRStr: "127.0.0.0/15",
expectedUsedBegin: 1024,
expectedUsedEnd: 1031,
expectErr: false,
},
}
for _, tc := range cases {
_, clusterCIDR, err := net.ParseCIDR(tc.clusterCIDRStr)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
cs := newCIDRSet(clusterCIDR, tc.subNetMaskSize)
_, subnetCIDR, err := net.ParseCIDR(tc.subNetCIDRStr)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
err = cs.occupy(subnetCIDR)
if err == nil && tc.expectErr {
t.Errorf("expected error but got none")
continue
}
if err != nil && !tc.expectErr {
t.Errorf("unexpected error: %v", err)
continue
}
expectedUsed := big.Int{}
for i := tc.expectedUsedBegin; i <= tc.expectedUsedEnd; i++ {
expectedUsed.SetBit(&expectedUsed, i, 1)
}
if expectedUsed.Cmp(&cs.used) != 0 {
t.Errorf("error")
}
}
}

View file

@ -0,0 +1,290 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"fmt"
"strings"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/types"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
"k8s.io/kubernetes/pkg/util/node"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/version"
"github.com/golang/glog"
)
const (
// Number of Nodes that needs to be in the cluster for it to be treated as "large"
LargeClusterThreshold = 20
)
// deletePods will delete all pods from master running on given node, and return true
// if any pods were deleted, or were found pending deletion.
func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, nodeName, nodeUID string, daemonStore cache.StoreToDaemonSetLister) (bool, error) {
remaining := false
selector := fields.OneTermEqualSelector(api.PodHostField, nodeName).String()
options := v1.ListOptions{FieldSelector: selector}
pods, err := kubeClient.Core().Pods(v1.NamespaceAll).List(options)
var updateErrList []error
if err != nil {
return remaining, err
}
if len(pods.Items) > 0 {
recordNodeEvent(recorder, nodeName, nodeUID, v1.EventTypeNormal, "DeletingAllPods", fmt.Sprintf("Deleting all Pods from Node %v.", nodeName))
}
for _, pod := range pods.Items {
// Defensive check, also needed for tests.
if pod.Spec.NodeName != nodeName {
continue
}
// Set reason and message in the pod object.
if _, err = setPodTerminationReason(kubeClient, &pod, nodeName); err != nil {
if errors.IsConflict(err) {
updateErrList = append(updateErrList,
fmt.Errorf("update status failed for pod %q: %v", format.Pod(&pod), err))
continue
}
}
// if the pod has already been marked for deletion, we still return true that there are remaining pods.
if pod.DeletionGracePeriodSeconds != nil {
remaining = true
continue
}
// if the pod is managed by a daemonset, ignore it
_, err := daemonStore.GetPodDaemonSets(&pod)
if err == nil { // No error means at least one daemonset was found
continue
}
glog.V(2).Infof("Starting deletion of pod %v", pod.Name)
recorder.Eventf(&pod, v1.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName)
if err := kubeClient.Core().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
return false, err
}
remaining = true
}
if len(updateErrList) > 0 {
return false, utilerrors.NewAggregate(updateErrList)
}
return remaining, nil
}
// setPodTerminationReason attempts to set a reason and message in the pod status, updates it in the apiserver,
// and returns an error if it encounters one.
func setPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeName string) (*v1.Pod, error) {
if pod.Status.Reason == node.NodeUnreachablePodReason {
return pod, nil
}
pod.Status.Reason = node.NodeUnreachablePodReason
pod.Status.Message = fmt.Sprintf(node.NodeUnreachablePodMessage, nodeName, pod.Name)
var updatedPod *v1.Pod
var err error
if updatedPod, err = kubeClient.Core().Pods(pod.Namespace).UpdateStatus(pod); err != nil {
return nil, err
}
return updatedPod, nil
}
func forcefullyDeletePod(c clientset.Interface, pod *v1.Pod) error {
var zero int64
glog.Infof("NodeController is force deleting Pod: %v:%v", pod.Namespace, pod.Name)
err := c.Core().Pods(pod.Namespace).Delete(pod.Name, &v1.DeleteOptions{GracePeriodSeconds: &zero})
if err == nil {
glog.V(4).Infof("forceful deletion of %s succeeded", pod.Name)
}
return err
}
// forcefullyDeleteNode immediately the node. The pods on the node are cleaned
// up by the podGC.
func forcefullyDeleteNode(kubeClient clientset.Interface, nodeName string) error {
if err := kubeClient.Core().Nodes().Delete(nodeName, nil); err != nil {
return fmt.Errorf("unable to delete node %q: %v", nodeName, err)
}
return nil
}
// maybeDeleteTerminatingPod non-gracefully deletes pods that are terminating
// that should not be gracefully terminated.
func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) {
pod, ok := obj.(*v1.Pod)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("Couldn't get object from tombstone %#v", obj)
return
}
pod, ok = tombstone.Obj.(*v1.Pod)
if !ok {
glog.Errorf("Tombstone contained object that is not a Pod %#v", obj)
return
}
}
// consider only terminating pods
if pod.DeletionTimestamp == nil {
return
}
nodeObj, found, err := nc.nodeStore.Store.GetByKey(pod.Spec.NodeName)
if err != nil {
// this can only happen if the Store.KeyFunc has a problem creating
// a key for the pod. If it happens once, it will happen again so
// don't bother requeuing the pod.
utilruntime.HandleError(err)
return
}
// if there is no such node, do nothing and let the podGC clean it up.
if !found {
return
}
// delete terminating pods that have been scheduled on
// nodes that do not support graceful termination
// TODO(mikedanese): this can be removed when we no longer
// guarantee backwards compatibility of master API to kubelets with
// versions less than 1.1.0
node := nodeObj.(*v1.Node)
v, err := version.Parse(node.Status.NodeInfo.KubeletVersion)
if err != nil {
glog.V(0).Infof("couldn't parse verions %q of minion: %v", node.Status.NodeInfo.KubeletVersion, err)
utilruntime.HandleError(nc.forcefullyDeletePod(pod))
return
}
if gracefulDeletionVersion.GT(v) {
utilruntime.HandleError(nc.forcefullyDeletePod(pod))
return
}
}
// update ready status of all pods running on given node from master
// return true if success
func markAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error {
// Don't set pods to NotReady if the kubelet is running a version that
// doesn't understand how to correct readiness.
// TODO: Remove this check when we no longer guarantee backward compatibility
// with node versions < 1.2.0.
if nodeRunningOutdatedKubelet(node) {
return nil
}
nodeName := node.Name
glog.V(2).Infof("Update ready status of pods on node [%v]", nodeName)
opts := v1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, nodeName).String()}
pods, err := kubeClient.Core().Pods(v1.NamespaceAll).List(opts)
if err != nil {
return err
}
errMsg := []string{}
for _, pod := range pods.Items {
// Defensive check, also needed for tests.
if pod.Spec.NodeName != nodeName {
continue
}
for i, cond := range pod.Status.Conditions {
if cond.Type == v1.PodReady {
pod.Status.Conditions[i].Status = v1.ConditionFalse
glog.V(2).Infof("Updating ready status of pod %v to false", pod.Name)
_, err := kubeClient.Core().Pods(pod.Namespace).UpdateStatus(&pod)
if err != nil {
glog.Warningf("Failed to update status for pod %q: %v", format.Pod(&pod), err)
errMsg = append(errMsg, fmt.Sprintf("%v", err))
}
break
}
}
}
if len(errMsg) == 0 {
return nil
}
return fmt.Errorf("%v", strings.Join(errMsg, "; "))
}
// nodeRunningOutdatedKubelet returns true if the kubeletVersion reported
// in the nodeInfo of the given node is "outdated", meaning < 1.2.0.
// Older versions were inflexible and modifying pod.Status directly through
// the apiserver would result in unexpected outcomes.
func nodeRunningOutdatedKubelet(node *v1.Node) bool {
v, err := version.Parse(node.Status.NodeInfo.KubeletVersion)
if err != nil {
glog.Errorf("couldn't parse version %q of node %v", node.Status.NodeInfo.KubeletVersion, err)
return true
}
if podStatusReconciliationVersion.GT(v) {
glog.Infof("Node %v running kubelet at (%v) which is less than the minimum version that allows nodecontroller to mark pods NotReady (%v).", node.Name, v, podStatusReconciliationVersion)
return true
}
return false
}
func nodeExistsInCloudProvider(cloud cloudprovider.Interface, nodeName types.NodeName) (bool, error) {
instances, ok := cloud.Instances()
if !ok {
return false, fmt.Errorf("%v", ErrCloudInstance)
}
if _, err := instances.ExternalID(nodeName); err != nil {
if err == cloudprovider.InstanceNotFound {
return false, nil
}
return false, err
}
return true, nil
}
func recordNodeEvent(recorder record.EventRecorder, nodeName, nodeUID, eventtype, reason, event string) {
ref := &v1.ObjectReference{
Kind: "Node",
Name: nodeName,
UID: types.UID(nodeUID),
Namespace: "",
}
glog.V(2).Infof("Recording %s event message for node %s", event, nodeName)
recorder.Eventf(ref, eventtype, reason, "Node %s event: %s", nodeName, event)
}
func recordNodeStatusChange(recorder record.EventRecorder, node *v1.Node, new_status string) {
ref := &v1.ObjectReference{
Kind: "Node",
Name: node.Name,
UID: node.UID,
Namespace: "",
}
glog.V(2).Infof("Recording status change %s event message for node %s", new_status, node.Name)
// TODO: This requires a transaction, either both node status is updated
// and event is recorded or neither should happen, see issue #6055.
recorder.Eventf(ref, v1.EventTypeNormal, new_status, "Node %s status is now: %s", node.Name, new_status)
}

19
vendor/k8s.io/kubernetes/pkg/controller/node/doc.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package node contains code for syncing cloud instances with
// node registry
package node // import "k8s.io/kubernetes/pkg/controller/node"

View file

@ -0,0 +1,77 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
)
const (
NodeControllerSubsystem = "node_collector"
ZoneHealthStatisticKey = "zone_health"
ZoneSizeKey = "zone_size"
ZoneNoUnhealthyNodesKey = "unhealty_nodes_in_zone"
EvictionsNumberKey = "evictions_number"
)
var (
ZoneHealth = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: NodeControllerSubsystem,
Name: ZoneHealthStatisticKey,
Help: "Gauge measuring percentage of healty nodes per zone.",
},
[]string{"zone"},
)
ZoneSize = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: NodeControllerSubsystem,
Name: ZoneSizeKey,
Help: "Gauge measuring number of registered Nodes per zones.",
},
[]string{"zone"},
)
UnhealthyNodes = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: NodeControllerSubsystem,
Name: ZoneNoUnhealthyNodesKey,
Help: "Gauge measuring number of not Ready Nodes per zones.",
},
[]string{"zone"},
)
EvictionsNumber = prometheus.NewCounterVec(
prometheus.CounterOpts{
Subsystem: NodeControllerSubsystem,
Name: EvictionsNumberKey,
Help: "Number of Node evictions that happened since current instance of NodeController started.",
},
[]string{"zone"},
)
)
var registerMetrics sync.Once
func Register() {
registerMetrics.Do(func() {
prometheus.MustRegister(ZoneHealth)
prometheus.MustRegister(ZoneSize)
prometheus.MustRegister(UnhealthyNodes)
prometheus.MustRegister(EvictionsNumber)
})
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,282 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"container/heap"
"sync"
"time"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/sets"
"github.com/golang/glog"
)
// TimedValue is a value that should be processed at a designated time.
type TimedValue struct {
Value string
// UID could be anything that helps identify the value
UID interface{}
AddedAt time.Time
ProcessAt time.Time
}
// now is used to test time
var now func() time.Time = time.Now
// TimedQueue is a priority heap where the lowest ProcessAt is at the front of the queue
type TimedQueue []*TimedValue
func (h TimedQueue) Len() int { return len(h) }
func (h TimedQueue) Less(i, j int) bool { return h[i].ProcessAt.Before(h[j].ProcessAt) }
func (h TimedQueue) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *TimedQueue) Push(x interface{}) {
*h = append(*h, x.(*TimedValue))
}
func (h *TimedQueue) Pop() interface{} {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}
// A FIFO queue which additionally guarantees that any element can be added only once until
// it is removed.
type UniqueQueue struct {
lock sync.Mutex
queue TimedQueue
set sets.String
}
// Adds a new value to the queue if it wasn't added before, or was explicitly removed by the
// Remove call. Returns true if new value was added.
func (q *UniqueQueue) Add(value TimedValue) bool {
q.lock.Lock()
defer q.lock.Unlock()
if q.set.Has(value.Value) {
return false
}
heap.Push(&q.queue, &value)
q.set.Insert(value.Value)
return true
}
// Replace replaces an existing value in the queue if it already exists, otherwise it does nothing.
// Returns true if the item was found.
func (q *UniqueQueue) Replace(value TimedValue) bool {
q.lock.Lock()
defer q.lock.Unlock()
for i := range q.queue {
if q.queue[i].Value != value.Value {
continue
}
heap.Remove(&q.queue, i)
heap.Push(&q.queue, &value)
return true
}
return false
}
// Removes the value from the queue, but keeps it in the set, so it won't be added second time.
// Returns true if something was removed.
func (q *UniqueQueue) RemoveFromQueue(value string) bool {
q.lock.Lock()
defer q.lock.Unlock()
if !q.set.Has(value) {
return false
}
for i, val := range q.queue {
if val.Value == value {
heap.Remove(&q.queue, i)
return true
}
}
return false
}
// Removes the value from the queue, so Get() call won't return it, and allow subsequent addition
// of the given value. If the value is not present does nothing and returns false.
func (q *UniqueQueue) Remove(value string) bool {
q.lock.Lock()
defer q.lock.Unlock()
if !q.set.Has(value) {
return false
}
q.set.Delete(value)
for i, val := range q.queue {
if val.Value == value {
heap.Remove(&q.queue, i)
return true
}
}
return true
}
// Returns the oldest added value that wasn't returned yet.
func (q *UniqueQueue) Get() (TimedValue, bool) {
q.lock.Lock()
defer q.lock.Unlock()
if len(q.queue) == 0 {
return TimedValue{}, false
}
result := heap.Pop(&q.queue).(*TimedValue)
q.set.Delete(result.Value)
return *result, true
}
// Head returns the oldest added value that wasn't returned yet without removing it.
func (q *UniqueQueue) Head() (TimedValue, bool) {
q.lock.Lock()
defer q.lock.Unlock()
if len(q.queue) == 0 {
return TimedValue{}, false
}
result := q.queue[0]
return *result, true
}
// Clear removes all items from the queue and duplication preventing set.
func (q *UniqueQueue) Clear() {
q.lock.Lock()
defer q.lock.Unlock()
if q.queue.Len() > 0 {
q.queue = make(TimedQueue, 0)
}
if len(q.set) > 0 {
q.set = sets.NewString()
}
}
// RateLimitedTimedQueue is a unique item priority queue ordered by the expected next time
// of execution. It is also rate limited.
type RateLimitedTimedQueue struct {
queue UniqueQueue
limiterLock sync.Mutex
limiter flowcontrol.RateLimiter
}
// Creates new queue which will use given RateLimiter to oversee execution.
func NewRateLimitedTimedQueue(limiter flowcontrol.RateLimiter) *RateLimitedTimedQueue {
return &RateLimitedTimedQueue{
queue: UniqueQueue{
queue: TimedQueue{},
set: sets.NewString(),
},
limiter: limiter,
}
}
// ActionFunc takes a timed value and returns false if the item must be retried, with an optional
// time.Duration if some minimum wait interval should be used.
type ActionFunc func(TimedValue) (bool, time.Duration)
// Try processes the queue. Ends prematurely if RateLimiter forbids an action and leak is true.
// Otherwise, requeues the item to be processed. Each value is processed once if fn returns true,
// otherwise it is added back to the queue. The returned remaining is used to identify the minimum
// time to execute the next item in the queue. The same value is processed only once unless
// Remove is explicitly called on it (it's done by the cancelPodEviction function in NodeController
// when Node becomes Ready again)
// TODO: figure out a good way to do garbage collection for all Nodes that were removed from
// the cluster.
func (q *RateLimitedTimedQueue) Try(fn ActionFunc) {
val, ok := q.queue.Head()
q.limiterLock.Lock()
defer q.limiterLock.Unlock()
for ok {
// rate limit the queue checking
if !q.limiter.TryAccept() {
glog.V(10).Infof("Try rate limited for value: %v", val)
// Try again later
break
}
now := now()
if now.Before(val.ProcessAt) {
break
}
if ok, wait := fn(val); !ok {
val.ProcessAt = now.Add(wait + 1)
q.queue.Replace(val)
} else {
q.queue.RemoveFromQueue(val.Value)
}
val, ok = q.queue.Head()
}
}
// Adds value to the queue to be processed. Won't add the same value(comparsion by value) a second time
// if it was already added and not removed.
func (q *RateLimitedTimedQueue) Add(value string, uid interface{}) bool {
now := now()
return q.queue.Add(TimedValue{
Value: value,
UID: uid,
AddedAt: now,
ProcessAt: now,
})
}
// Removes Node from the Evictor. The Node won't be processed until added again.
func (q *RateLimitedTimedQueue) Remove(value string) bool {
return q.queue.Remove(value)
}
// Removes all items from the queue
func (q *RateLimitedTimedQueue) Clear() {
q.queue.Clear()
}
// SwapLimiter safely swaps current limiter for this queue with the passed one if capacities or qps's differ.
func (q *RateLimitedTimedQueue) SwapLimiter(newQPS float32) {
q.limiterLock.Lock()
defer q.limiterLock.Unlock()
if q.limiter.QPS() == newQPS {
return
}
var newLimiter flowcontrol.RateLimiter
if newQPS <= 0 {
newLimiter = flowcontrol.NewFakeNeverRateLimiter()
} else {
newLimiter = flowcontrol.NewTokenBucketRateLimiter(newQPS, evictionRateLimiterBurst)
}
// If we're currently waiting on limiter, we drain the new one - this is a good approach when Burst value is 1
// TODO: figure out if we need to support higher Burst values and decide on the drain logic, should we keep:
// - saturation (percentage of used tokens)
// - number of used tokens
// - number of available tokens
// - something else
for q.limiter.Saturation() > newLimiter.Saturation() {
// Check if we're not using fake limiter
previousSaturation := newLimiter.Saturation()
newLimiter.TryAccept()
// It's a fake limiter
if newLimiter.Saturation() == previousSaturation {
break
}
}
q.limiter.Stop()
q.limiter = newLimiter
}

View file

@ -0,0 +1,334 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"reflect"
"testing"
"time"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/sets"
)
func CheckQueueEq(lhs []string, rhs TimedQueue) bool {
for i := 0; i < len(lhs); i++ {
if rhs[i].Value != lhs[i] {
return false
}
}
return true
}
func CheckSetEq(lhs, rhs sets.String) bool {
return lhs.HasAll(rhs.List()...) && rhs.HasAll(lhs.List()...)
}
func TestAddNode(t *testing.T) {
evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
evictor.Add("first", "11111")
evictor.Add("second", "22222")
evictor.Add("third", "33333")
queuePattern := []string{"first", "second", "third"}
if len(evictor.queue.queue) != len(queuePattern) {
t.Fatalf("Queue %v should have length %d", evictor.queue.queue, len(queuePattern))
}
if !CheckQueueEq(queuePattern, evictor.queue.queue) {
t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern)
}
setPattern := sets.NewString("first", "second", "third")
if len(evictor.queue.set) != len(setPattern) {
t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern))
}
if !CheckSetEq(setPattern, evictor.queue.set) {
t.Errorf("Invalid map. Got %v, expected %v", evictor.queue.set, setPattern)
}
}
func TestDelNode(t *testing.T) {
defer func() { now = time.Now }()
var tick int64
now = func() time.Time {
t := time.Unix(tick, 0)
tick++
return t
}
evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
evictor.Add("first", "11111")
evictor.Add("second", "22222")
evictor.Add("third", "33333")
evictor.Remove("first")
queuePattern := []string{"second", "third"}
if len(evictor.queue.queue) != len(queuePattern) {
t.Fatalf("Queue %v should have length %d", evictor.queue.queue, len(queuePattern))
}
if !CheckQueueEq(queuePattern, evictor.queue.queue) {
t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern)
}
setPattern := sets.NewString("second", "third")
if len(evictor.queue.set) != len(setPattern) {
t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern))
}
if !CheckSetEq(setPattern, evictor.queue.set) {
t.Errorf("Invalid map. Got %v, expected %v", evictor.queue.set, setPattern)
}
evictor = NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
evictor.Add("first", "11111")
evictor.Add("second", "22222")
evictor.Add("third", "33333")
evictor.Remove("second")
queuePattern = []string{"first", "third"}
if len(evictor.queue.queue) != len(queuePattern) {
t.Fatalf("Queue %v should have length %d", evictor.queue.queue, len(queuePattern))
}
if !CheckQueueEq(queuePattern, evictor.queue.queue) {
t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern)
}
setPattern = sets.NewString("first", "third")
if len(evictor.queue.set) != len(setPattern) {
t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern))
}
if !CheckSetEq(setPattern, evictor.queue.set) {
t.Errorf("Invalid map. Got %v, expected %v", evictor.queue.set, setPattern)
}
evictor = NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
evictor.Add("first", "11111")
evictor.Add("second", "22222")
evictor.Add("third", "33333")
evictor.Remove("third")
queuePattern = []string{"first", "second"}
if len(evictor.queue.queue) != len(queuePattern) {
t.Fatalf("Queue %v should have length %d", evictor.queue.queue, len(queuePattern))
}
if !CheckQueueEq(queuePattern, evictor.queue.queue) {
t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern)
}
setPattern = sets.NewString("first", "second")
if len(evictor.queue.set) != len(setPattern) {
t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern))
}
if !CheckSetEq(setPattern, evictor.queue.set) {
t.Errorf("Invalid map. Got %v, expected %v", evictor.queue.set, setPattern)
}
}
func TestTry(t *testing.T) {
evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
evictor.Add("first", "11111")
evictor.Add("second", "22222")
evictor.Add("third", "33333")
evictor.Remove("second")
deletedMap := sets.NewString()
evictor.Try(func(value TimedValue) (bool, time.Duration) {
deletedMap.Insert(value.Value)
return true, 0
})
setPattern := sets.NewString("first", "third")
if len(deletedMap) != len(setPattern) {
t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern))
}
if !CheckSetEq(setPattern, deletedMap) {
t.Errorf("Invalid map. Got %v, expected %v", deletedMap, setPattern)
}
}
func TestTryOrdering(t *testing.T) {
defer func() { now = time.Now }()
current := time.Unix(0, 0)
delay := 0
// the current time is incremented by 1ms every time now is invoked
now = func() time.Time {
if delay > 0 {
delay--
} else {
current = current.Add(time.Millisecond)
}
t.Logf("time %d", current.UnixNano())
return current
}
evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
evictor.Add("first", "11111")
evictor.Add("second", "22222")
evictor.Add("third", "33333")
order := []string{}
count := 0
hasQueued := false
evictor.Try(func(value TimedValue) (bool, time.Duration) {
count++
t.Logf("eviction %d", count)
if value.ProcessAt.IsZero() {
t.Fatalf("processAt should not be zero")
}
switch value.Value {
case "first":
if !value.AddedAt.Equal(time.Unix(0, time.Millisecond.Nanoseconds())) {
t.Fatalf("added time for %s is %d", value.Value, value.AddedAt)
}
case "second":
if !value.AddedAt.Equal(time.Unix(0, 2*time.Millisecond.Nanoseconds())) {
t.Fatalf("added time for %s is %d", value.Value, value.AddedAt)
}
if hasQueued {
if !value.ProcessAt.Equal(time.Unix(0, 6*time.Millisecond.Nanoseconds())) {
t.Fatalf("process time for %s is %d", value.Value, value.ProcessAt)
}
break
}
hasQueued = true
delay = 1
t.Logf("going to delay")
return false, 2 * time.Millisecond
case "third":
if !value.AddedAt.Equal(time.Unix(0, 3*time.Millisecond.Nanoseconds())) {
t.Fatalf("added time for %s is %d", value.Value, value.AddedAt)
}
}
order = append(order, value.Value)
return true, 0
})
if !reflect.DeepEqual(order, []string{"first", "third"}) {
t.Fatalf("order was wrong: %v", order)
}
if count != 3 {
t.Fatalf("unexpected iterations: %d", count)
}
}
func TestTryRemovingWhileTry(t *testing.T) {
evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
evictor.Add("first", "11111")
evictor.Add("second", "22222")
evictor.Add("third", "33333")
processing := make(chan struct{})
wait := make(chan struct{})
order := []string{}
count := 0
queued := false
// while the Try function is processing "second", remove it from the queue
// we should not see "second" retried.
go func() {
<-processing
evictor.Remove("second")
close(wait)
}()
evictor.Try(func(value TimedValue) (bool, time.Duration) {
count++
if value.AddedAt.IsZero() {
t.Fatalf("added should not be zero")
}
if value.ProcessAt.IsZero() {
t.Fatalf("next should not be zero")
}
if !queued && value.Value == "second" {
queued = true
close(processing)
<-wait
return false, time.Millisecond
}
order = append(order, value.Value)
return true, 0
})
if !reflect.DeepEqual(order, []string{"first", "third"}) {
t.Fatalf("order was wrong: %v", order)
}
if count != 3 {
t.Fatalf("unexpected iterations: %d", count)
}
}
func TestClear(t *testing.T) {
evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
evictor.Add("first", "11111")
evictor.Add("second", "22222")
evictor.Add("third", "33333")
evictor.Clear()
if len(evictor.queue.queue) != 0 {
t.Fatalf("Clear should remove all elements from the queue.")
}
}
func TestSwapLimiter(t *testing.T) {
evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
fakeAlways := flowcontrol.NewFakeAlwaysRateLimiter()
qps := evictor.limiter.QPS()
if qps != fakeAlways.QPS() {
t.Fatalf("QPS does not match create one: %v instead of %v", qps, fakeAlways.QPS())
}
evictor.SwapLimiter(0)
qps = evictor.limiter.QPS()
fakeNever := flowcontrol.NewFakeNeverRateLimiter()
if qps != fakeNever.QPS() {
t.Fatalf("QPS does not match create one: %v instead of %v", qps, fakeNever.QPS())
}
createdQPS := float32(5.5)
evictor.SwapLimiter(createdQPS)
qps = evictor.limiter.QPS()
if qps != createdQPS {
t.Fatalf("QPS does not match create one: %v instead of %v", qps, createdQPS)
}
}
func TestAddAfterTry(t *testing.T) {
evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
evictor.Add("first", "11111")
evictor.Add("second", "22222")
evictor.Add("third", "33333")
evictor.Remove("second")
deletedMap := sets.NewString()
evictor.Try(func(value TimedValue) (bool, time.Duration) {
deletedMap.Insert(value.Value)
return true, 0
})
setPattern := sets.NewString("first", "third")
if len(deletedMap) != len(setPattern) {
t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern))
}
if !CheckSetEq(setPattern, deletedMap) {
t.Errorf("Invalid map. Got %v, expected %v", deletedMap, setPattern)
}
evictor.Add("first", "11111")
evictor.Try(func(value TimedValue) (bool, time.Duration) {
t.Errorf("We shouldn't process the same value if the explicit remove wasn't called.")
return true, 0
})
}

View file

@ -0,0 +1,332 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"errors"
"fmt"
"sync"
"time"
"k8s.io/kubernetes/pkg/api"
apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/clock"
utilnode "k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/watch"
)
// FakeNodeHandler is a fake implementation of NodesInterface and NodeInterface. It
// allows test cases to have fine-grained control over mock behaviors. We also need
// PodsInterface and PodInterface to test list & delet pods, which is implemented in
// the embedded client.Fake field.
type FakeNodeHandler struct {
*fake.Clientset
// Input: Hooks determine if request is valid or not
CreateHook func(*FakeNodeHandler, *v1.Node) bool
Existing []*v1.Node
// Output
CreatedNodes []*v1.Node
DeletedNodes []*v1.Node
UpdatedNodes []*v1.Node
UpdatedNodeStatuses []*v1.Node
RequestCount int
// Synchronization
lock sync.Mutex
deleteWaitChan chan struct{}
}
type FakeLegacyHandler struct {
v1core.CoreV1Interface
n *FakeNodeHandler
}
func (c *FakeNodeHandler) getUpdatedNodesCopy() []*v1.Node {
c.lock.Lock()
defer c.lock.Unlock()
updatedNodesCopy := make([]*v1.Node, len(c.UpdatedNodes), len(c.UpdatedNodes))
for i, ptr := range c.UpdatedNodes {
updatedNodesCopy[i] = ptr
}
return updatedNodesCopy
}
func (c *FakeNodeHandler) Core() v1core.CoreV1Interface {
return &FakeLegacyHandler{c.Clientset.Core(), c}
}
func (m *FakeLegacyHandler) Nodes() v1core.NodeInterface {
return m.n
}
func (m *FakeNodeHandler) Create(node *v1.Node) (*v1.Node, error) {
m.lock.Lock()
defer func() {
m.RequestCount++
m.lock.Unlock()
}()
for _, n := range m.Existing {
if n.Name == node.Name {
return nil, apierrors.NewAlreadyExists(api.Resource("nodes"), node.Name)
}
}
if m.CreateHook == nil || m.CreateHook(m, node) {
nodeCopy := *node
m.CreatedNodes = append(m.CreatedNodes, &nodeCopy)
return node, nil
} else {
return nil, errors.New("Create error.")
}
}
func (m *FakeNodeHandler) Get(name string) (*v1.Node, error) {
m.lock.Lock()
defer func() {
m.RequestCount++
m.lock.Unlock()
}()
for i := range m.UpdatedNodes {
if m.UpdatedNodes[i].Name == name {
nodeCopy := *m.UpdatedNodes[i]
return &nodeCopy, nil
}
}
for i := range m.Existing {
if m.Existing[i].Name == name {
nodeCopy := *m.Existing[i]
return &nodeCopy, nil
}
}
return nil, nil
}
func (m *FakeNodeHandler) List(opts v1.ListOptions) (*v1.NodeList, error) {
m.lock.Lock()
defer func() {
m.RequestCount++
m.lock.Unlock()
}()
var nodes []*v1.Node
for i := 0; i < len(m.UpdatedNodes); i++ {
if !contains(m.UpdatedNodes[i], m.DeletedNodes) {
nodes = append(nodes, m.UpdatedNodes[i])
}
}
for i := 0; i < len(m.Existing); i++ {
if !contains(m.Existing[i], m.DeletedNodes) && !contains(m.Existing[i], nodes) {
nodes = append(nodes, m.Existing[i])
}
}
for i := 0; i < len(m.CreatedNodes); i++ {
if !contains(m.CreatedNodes[i], m.DeletedNodes) && !contains(m.CreatedNodes[i], nodes) {
nodes = append(nodes, m.CreatedNodes[i])
}
}
nodeList := &v1.NodeList{}
for _, node := range nodes {
nodeList.Items = append(nodeList.Items, *node)
}
return nodeList, nil
}
func (m *FakeNodeHandler) Delete(id string, opt *v1.DeleteOptions) error {
m.lock.Lock()
defer func() {
m.RequestCount++
if m.deleteWaitChan != nil {
m.deleteWaitChan <- struct{}{}
}
m.lock.Unlock()
}()
m.DeletedNodes = append(m.DeletedNodes, newNode(id))
return nil
}
func (m *FakeNodeHandler) DeleteCollection(opt *v1.DeleteOptions, listOpts v1.ListOptions) error {
return nil
}
func (m *FakeNodeHandler) Update(node *v1.Node) (*v1.Node, error) {
m.lock.Lock()
defer func() {
m.RequestCount++
m.lock.Unlock()
}()
nodeCopy := *node
for i, updateNode := range m.UpdatedNodes {
if updateNode.Name == nodeCopy.Name {
m.UpdatedNodes[i] = &nodeCopy
return node, nil
}
}
m.UpdatedNodes = append(m.UpdatedNodes, &nodeCopy)
return node, nil
}
func (m *FakeNodeHandler) UpdateStatus(node *v1.Node) (*v1.Node, error) {
m.lock.Lock()
defer func() {
m.RequestCount++
m.lock.Unlock()
}()
nodeCopy := *node
m.UpdatedNodeStatuses = append(m.UpdatedNodeStatuses, &nodeCopy)
return node, nil
}
func (m *FakeNodeHandler) PatchStatus(nodeName string, data []byte) (*v1.Node, error) {
m.RequestCount++
return &v1.Node{}, nil
}
func (m *FakeNodeHandler) Watch(opts v1.ListOptions) (watch.Interface, error) {
return watch.NewFake(), nil
}
func (m *FakeNodeHandler) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (*v1.Node, error) {
return nil, nil
}
// FakeRecorder is used as a fake during testing.
type FakeRecorder struct {
source v1.EventSource
events []*v1.Event
clock clock.Clock
}
func (f *FakeRecorder) Event(obj runtime.Object, eventtype, reason, message string) {
f.generateEvent(obj, metav1.Now(), eventtype, reason, message)
}
func (f *FakeRecorder) Eventf(obj runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
f.Event(obj, eventtype, reason, fmt.Sprintf(messageFmt, args...))
}
func (f *FakeRecorder) PastEventf(obj runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) {
}
func (f *FakeRecorder) generateEvent(obj runtime.Object, timestamp metav1.Time, eventtype, reason, message string) {
ref, err := v1.GetReference(obj)
if err != nil {
return
}
event := f.makeEvent(ref, eventtype, reason, message)
event.Source = f.source
if f.events != nil {
fmt.Println("write event")
f.events = append(f.events, event)
}
}
func (f *FakeRecorder) makeEvent(ref *v1.ObjectReference, eventtype, reason, message string) *v1.Event {
fmt.Println("make event")
t := metav1.Time{Time: f.clock.Now()}
namespace := ref.Namespace
if namespace == "" {
namespace = v1.NamespaceDefault
}
return &v1.Event{
ObjectMeta: v1.ObjectMeta{
Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()),
Namespace: namespace,
},
InvolvedObject: *ref,
Reason: reason,
Message: message,
FirstTimestamp: t,
LastTimestamp: t,
Count: 1,
Type: eventtype,
}
}
func NewFakeRecorder() *FakeRecorder {
return &FakeRecorder{
source: v1.EventSource{Component: "nodeControllerTest"},
events: []*v1.Event{},
clock: clock.NewFakeClock(time.Now()),
}
}
func newNode(name string) *v1.Node {
return &v1.Node{
ObjectMeta: v1.ObjectMeta{Name: name},
Spec: v1.NodeSpec{
ExternalID: name,
},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("10"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
},
},
}
}
func newPod(name, host string) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Namespace: "default",
Name: name,
},
Spec: v1.PodSpec{
NodeName: host,
},
Status: v1.PodStatus{
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
},
},
},
}
return pod
}
func contains(node *v1.Node, nodes []*v1.Node) bool {
for i := 0; i < len(nodes); i++ {
if node.Name == nodes[i].Name {
return true
}
}
return false
}
// Returns list of zones for all Nodes stored in FakeNodeHandler
func getZones(nodeHandler *FakeNodeHandler) []string {
nodes, _ := nodeHandler.List(v1.ListOptions{})
zones := sets.NewString()
for _, node := range nodes.Items {
zones.Insert(utilnode.GetZoneKey(&node))
}
return zones.List()
}
func createZoneID(region, zone string) string {
return region + ":\x00:" + zone
}

79
vendor/k8s.io/kubernetes/pkg/controller/petset/BUILD generated vendored Normal file
View file

@ -0,0 +1,79 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"fakes.go",
"identity_mappers.go",
"iterator.go",
"pet.go",
"pet_set.go",
"pet_set_utils.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/errors:go_default_library",
"//pkg/api/resource:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/apps/v1beta1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/apps/v1beta1:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/core/v1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/errors:go_default_library",
"//pkg/util/runtime:go_default_library",
"//pkg/util/sets:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/util/workqueue:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/golang/glog",
"//vendor:gopkg.in/inf.v0",
],
)
go_test(
name = "go_default_test",
srcs = [
"identity_mappers_test.go",
"iterator_test.go",
"pet_set_test.go",
"pet_test.go",
],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/testapi:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/api/v1/pod:go_default_library",
"//pkg/apimachinery/registered:go_default_library",
"//pkg/apis/apps/v1beta1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/apps/v1beta1:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/apps/v1beta1/fake:go_default_library",
"//pkg/client/restclient:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/util/errors:go_default_library",
"//pkg/util/sets:go_default_library",
"//pkg/util/testing:go_default_library",
],
)

327
vendor/k8s.io/kubernetes/pkg/controller/petset/fakes.go generated vendored Normal file
View file

@ -0,0 +1,327 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package petset
import (
"fmt"
"time"
inf "gopkg.in/inf.v0"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
apipod "k8s.io/kubernetes/pkg/api/v1/pod"
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/sets"
)
func dec(i int64, exponent int) *inf.Dec {
return inf.NewDec(i, inf.Scale(-exponent))
}
func newPVC(name string) v1.PersistentVolumeClaim {
return v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: name,
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI),
},
},
},
}
}
func newStatefulSetWithVolumes(replicas int, name string, petMounts []v1.VolumeMount, podMounts []v1.VolumeMount) *apps.StatefulSet {
mounts := append(petMounts, podMounts...)
claims := []v1.PersistentVolumeClaim{}
for _, m := range petMounts {
claims = append(claims, newPVC(m.Name))
}
vols := []v1.Volume{}
for _, m := range podMounts {
vols = append(vols, v1.Volume{
Name: m.Name,
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: fmt.Sprintf("/tmp/%v", m.Name),
},
},
})
}
return &apps.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: "StatefulSet",
APIVersion: "apps/v1beta1",
},
ObjectMeta: v1.ObjectMeta{
Name: name,
Namespace: v1.NamespaceDefault,
UID: types.UID("test"),
},
Spec: apps.StatefulSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"},
},
Replicas: func() *int32 { i := int32(replicas); return &i }(),
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: "nginx",
VolumeMounts: mounts,
},
},
Volumes: vols,
},
},
VolumeClaimTemplates: claims,
ServiceName: "governingsvc",
},
}
}
func runningPod(ns, name string) *v1.Pod {
p := &v1.Pod{Status: v1.PodStatus{Phase: v1.PodRunning}}
p.Namespace = ns
p.Name = name
return p
}
func newPodList(ps *apps.StatefulSet, num int) []*v1.Pod {
// knownPods are pods in the system
knownPods := []*v1.Pod{}
for i := 0; i < num; i++ {
k, _ := newPCB(fmt.Sprintf("%v", i), ps)
knownPods = append(knownPods, k.pod)
}
return knownPods
}
func newStatefulSet(replicas int) *apps.StatefulSet {
petMounts := []v1.VolumeMount{
{Name: "datadir", MountPath: "/tmp/zookeeper"},
}
podMounts := []v1.VolumeMount{
{Name: "home", MountPath: "/home"},
}
return newStatefulSetWithVolumes(replicas, "foo", petMounts, podMounts)
}
func checkPodForMount(pod *v1.Pod, mountName string) error {
for _, c := range pod.Spec.Containers {
for _, v := range c.VolumeMounts {
if v.Name == mountName {
return nil
}
}
}
return fmt.Errorf("Found volume but no associated mount %v in pod %v", mountName, pod.Name)
}
func newFakePetClient() *fakePetClient {
return &fakePetClient{
pets: []*pcb{},
claims: []v1.PersistentVolumeClaim{},
recorder: &record.FakeRecorder{},
petHealthChecker: &defaultPetHealthChecker{},
}
}
type fakePetClient struct {
pets []*pcb
claims []v1.PersistentVolumeClaim
petsCreated int
petsDeleted int
claimsCreated int
claimsDeleted int
recorder record.EventRecorder
petHealthChecker
}
// Delete fakes pet client deletion.
func (f *fakePetClient) Delete(p *pcb) error {
pets := []*pcb{}
found := false
for i, pet := range f.pets {
if p.pod.Name == pet.pod.Name {
found = true
f.recorder.Eventf(pet.parent, v1.EventTypeNormal, "SuccessfulDelete", "pod: %v", pet.pod.Name)
continue
}
pets = append(pets, f.pets[i])
}
if !found {
// TODO: Return proper not found error
return fmt.Errorf("Delete failed: pod %v doesn't exist", p.pod.Name)
}
f.pets = pets
f.petsDeleted++
return nil
}
// Get fakes getting pets.
func (f *fakePetClient) Get(p *pcb) (*pcb, bool, error) {
for i, pet := range f.pets {
if p.pod.Name == pet.pod.Name {
return f.pets[i], true, nil
}
}
return nil, false, nil
}
// Create fakes pet creation.
func (f *fakePetClient) Create(p *pcb) error {
for _, pet := range f.pets {
if p.pod.Name == pet.pod.Name {
return fmt.Errorf("Create failed: pod %v already exists", p.pod.Name)
}
}
f.recorder.Eventf(p.parent, v1.EventTypeNormal, "SuccessfulCreate", "pod: %v", p.pod.Name)
f.pets = append(f.pets, p)
f.petsCreated++
return nil
}
// Update fakes pet updates.
func (f *fakePetClient) Update(expected, wanted *pcb) error {
found := false
pets := []*pcb{}
for i, pet := range f.pets {
if wanted.pod.Name == pet.pod.Name {
f.pets[i].pod.Annotations[apipod.PodHostnameAnnotation] = wanted.pod.Annotations[apipod.PodHostnameAnnotation]
f.pets[i].pod.Annotations[apipod.PodSubdomainAnnotation] = wanted.pod.Annotations[apipod.PodSubdomainAnnotation]
f.pets[i].pod.Spec = wanted.pod.Spec
found = true
}
pets = append(pets, f.pets[i])
}
f.pets = pets
if !found {
return fmt.Errorf("Cannot update pod %v not found", wanted.pod.Name)
}
// TODO: Delete pvcs/volumes that are in wanted but not in expected.
return nil
}
func (f *fakePetClient) getPodList() []*v1.Pod {
p := []*v1.Pod{}
for i, pet := range f.pets {
if pet.pod == nil {
continue
}
p = append(p, f.pets[i].pod)
}
return p
}
func (f *fakePetClient) deletePetAtIndex(index int) {
p := []*pcb{}
for i := range f.pets {
if i != index {
p = append(p, f.pets[i])
}
}
f.pets = p
}
func (f *fakePetClient) setHealthy(index int) error {
if len(f.pets) <= index {
return fmt.Errorf("Index out of range, len %v index %v", len(f.pets), index)
}
f.pets[index].pod.Status.Phase = v1.PodRunning
f.pets[index].pod.Annotations[StatefulSetInitAnnotation] = "true"
f.pets[index].pod.Status.Conditions = []v1.PodCondition{
{Type: v1.PodReady, Status: v1.ConditionTrue},
}
return nil
}
// isHealthy is a convenience wrapper around the default health checker.
// The first invocation returns not-healthy, but marks the pet healthy so
// subsequent invocations see it as healthy.
func (f *fakePetClient) isHealthy(pod *v1.Pod) bool {
if f.petHealthChecker.isHealthy(pod) {
return true
}
return false
}
func (f *fakePetClient) setDeletionTimestamp(index int) error {
if len(f.pets) <= index {
return fmt.Errorf("Index out of range, len %v index %v", len(f.pets), index)
}
f.pets[index].pod.DeletionTimestamp = &metav1.Time{Time: time.Now()}
return nil
}
// SyncPVCs fakes pvc syncing.
func (f *fakePetClient) SyncPVCs(pet *pcb) error {
v := pet.pvcs
updateClaims := map[string]v1.PersistentVolumeClaim{}
for i, update := range v {
updateClaims[update.Name] = v[i]
}
claimList := []v1.PersistentVolumeClaim{}
for i, existing := range f.claims {
if update, ok := updateClaims[existing.Name]; ok {
claimList = append(claimList, update)
delete(updateClaims, existing.Name)
} else {
claimList = append(claimList, f.claims[i])
}
}
for _, remaining := range updateClaims {
claimList = append(claimList, remaining)
f.claimsCreated++
f.recorder.Eventf(pet.parent, v1.EventTypeNormal, "SuccessfulCreate", "pvc: %v", remaining.Name)
}
f.claims = claimList
return nil
}
// DeletePVCs fakes pvc deletion.
func (f *fakePetClient) DeletePVCs(pet *pcb) error {
claimsToDelete := pet.pvcs
deleteClaimNames := sets.NewString()
for _, c := range claimsToDelete {
deleteClaimNames.Insert(c.Name)
}
pvcs := []v1.PersistentVolumeClaim{}
for i, existing := range f.claims {
if deleteClaimNames.Has(existing.Name) {
deleteClaimNames.Delete(existing.Name)
f.claimsDeleted++
f.recorder.Eventf(pet.parent, v1.EventTypeNormal, "SuccessfulDelete", "pvc: %v", existing.Name)
continue
}
pvcs = append(pvcs, f.claims[i])
}
f.claims = pvcs
if deleteClaimNames.Len() != 0 {
return fmt.Errorf("Claims %+v don't exist. Failed deletion.", deleteClaimNames)
}
return nil
}

View file

@ -0,0 +1,247 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package petset
import (
"crypto/md5"
"fmt"
"sort"
"strings"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1"
podapi "k8s.io/kubernetes/pkg/api/v1/pod"
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
"k8s.io/kubernetes/pkg/util/sets"
)
// identityMapper is an interface for assigning identities to a pet.
// All existing identity mappers just append "-(index)" to the statefulset name to
// generate a unique identity. This is used in claims/DNS/hostname/petname
// etc. There's a more elegant way to achieve this mapping, but we're
// taking the simplest route till we have data on whether users will need
// more customization.
// Note that running a single identity mapper is not guaranteed to give
// your pet a unique identity. You must run them all. Order doesn't matter.
type identityMapper interface {
// SetIdentity takes an id and assigns the given pet an identity based
// on the stateful set spec. The is must be unique amongst members of the
// stateful set.
SetIdentity(id string, pet *v1.Pod)
// Identity returns the identity of the pet.
Identity(pod *v1.Pod) string
}
func newIdentityMappers(ps *apps.StatefulSet) []identityMapper {
return []identityMapper{
&NameIdentityMapper{ps},
&NetworkIdentityMapper{ps},
&VolumeIdentityMapper{ps},
}
}
// NetworkIdentityMapper assigns network identity to pets.
type NetworkIdentityMapper struct {
ps *apps.StatefulSet
}
// SetIdentity sets network identity on the pet.
func (n *NetworkIdentityMapper) SetIdentity(id string, pet *v1.Pod) {
pet.Annotations[podapi.PodHostnameAnnotation] = fmt.Sprintf("%v-%v", n.ps.Name, id)
pet.Annotations[podapi.PodSubdomainAnnotation] = n.ps.Spec.ServiceName
return
}
// Identity returns the network identity of the pet.
func (n *NetworkIdentityMapper) Identity(pet *v1.Pod) string {
return n.String(pet)
}
// String is a string function for the network identity of the pet.
func (n *NetworkIdentityMapper) String(pet *v1.Pod) string {
hostname := pet.Annotations[podapi.PodHostnameAnnotation]
subdomain := pet.Annotations[podapi.PodSubdomainAnnotation]
return strings.Join([]string{hostname, subdomain, n.ps.Namespace}, ".")
}
// VolumeIdentityMapper assigns storage identity to pets.
type VolumeIdentityMapper struct {
ps *apps.StatefulSet
}
// SetIdentity sets storge identity on the pet.
func (v *VolumeIdentityMapper) SetIdentity(id string, pet *v1.Pod) {
petVolumes := []v1.Volume{}
petClaims := v.GetClaims(id)
// These volumes will all go down with the pod. If a name matches one of
// the claims in the stateful set, it gets clobbered.
podVolumes := map[string]v1.Volume{}
for _, podVol := range pet.Spec.Volumes {
podVolumes[podVol.Name] = podVol
}
// Insert claims for the idempotent statefulset volumes
for name, claim := range petClaims {
// Volumes on a pet for which there are no associated claims on the
// statefulset are pod local, and die with the pod.
podVol, ok := podVolumes[name]
if ok {
// TODO: Validate and reject this.
glog.V(4).Infof("Overwriting existing volume source %v", podVol.Name)
}
newVol := v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claim.Name,
// TODO: Use source definition to set this value when we have one.
ReadOnly: false,
},
},
}
petVolumes = append(petVolumes, newVol)
}
// Transfer any ephemeral pod volumes
for name, vol := range podVolumes {
if _, ok := petClaims[name]; !ok {
petVolumes = append(petVolumes, vol)
}
}
pet.Spec.Volumes = petVolumes
return
}
// Identity returns the storage identity of the pet.
func (v *VolumeIdentityMapper) Identity(pet *v1.Pod) string {
// TODO: Make this a hash?
return v.String(pet)
}
// String is a string function for the network identity of the pet.
func (v *VolumeIdentityMapper) String(pet *v1.Pod) string {
ids := []string{}
petVols := sets.NewString()
for _, petVol := range v.ps.Spec.VolumeClaimTemplates {
petVols.Insert(petVol.Name)
}
for _, podVol := range pet.Spec.Volumes {
// Volumes on a pet for which there are no associated claims on the
// statefulset are pod local, and die with the pod.
if !petVols.Has(podVol.Name) {
continue
}
if podVol.VolumeSource.PersistentVolumeClaim == nil {
// TODO: Is this a part of the identity?
ids = append(ids, fmt.Sprintf("%v:None", podVol.Name))
continue
}
ids = append(ids, fmt.Sprintf("%v:%v", podVol.Name, podVol.VolumeSource.PersistentVolumeClaim.ClaimName))
}
sort.Strings(ids)
return strings.Join(ids, "")
}
// GetClaims returns the volume claims associated with the given id.
// The claims belong to the statefulset. The id should be unique within a statefulset.
func (v *VolumeIdentityMapper) GetClaims(id string) map[string]v1.PersistentVolumeClaim {
petClaims := map[string]v1.PersistentVolumeClaim{}
for _, pvc := range v.ps.Spec.VolumeClaimTemplates {
claim := pvc
// TODO: Name length checking in validation.
claim.Name = fmt.Sprintf("%v-%v-%v", claim.Name, v.ps.Name, id)
claim.Namespace = v.ps.Namespace
claim.Labels = v.ps.Spec.Selector.MatchLabels
// TODO: We're assuming that the claim template has a volume QoS key, eg:
// volume.alpha.kubernetes.io/storage-class: anything
petClaims[pvc.Name] = claim
}
return petClaims
}
// GetClaimsForPet returns the pvcs for the given pet.
func (v *VolumeIdentityMapper) GetClaimsForPet(pet *v1.Pod) []v1.PersistentVolumeClaim {
// Strip out the "-(index)" from the pet name and use it to generate
// claim names.
id := strings.Split(pet.Name, "-")
petID := id[len(id)-1]
pvcs := []v1.PersistentVolumeClaim{}
for _, pvc := range v.GetClaims(petID) {
pvcs = append(pvcs, pvc)
}
return pvcs
}
// NameIdentityMapper assigns names to pets.
// It also puts the pet in the same namespace as the parent.
type NameIdentityMapper struct {
ps *apps.StatefulSet
}
// SetIdentity sets the pet namespace and name.
func (n *NameIdentityMapper) SetIdentity(id string, pet *v1.Pod) {
pet.Name = fmt.Sprintf("%v-%v", n.ps.Name, id)
pet.Namespace = n.ps.Namespace
return
}
// Identity returns the name identity of the pet.
func (n *NameIdentityMapper) Identity(pet *v1.Pod) string {
return n.String(pet)
}
// String is a string function for the name identity of the pet.
func (n *NameIdentityMapper) String(pet *v1.Pod) string {
return fmt.Sprintf("%v/%v", pet.Namespace, pet.Name)
}
// identityHash computes a hash of the pet by running all the above identity
// mappers.
func identityHash(ps *apps.StatefulSet, pet *v1.Pod) string {
id := ""
for _, idMapper := range newIdentityMappers(ps) {
id += idMapper.Identity(pet)
}
return fmt.Sprintf("%x", md5.Sum([]byte(id)))
}
// copyPetID gives the realPet the same identity as the expectedPet.
// Note that this is *not* a literal copy, but a copy of the fields that
// contribute to the pet's identity. The returned boolean 'needsUpdate' will
// be false if the realPet already has the same identity as the expectedPet.
func copyPetID(realPet, expectedPet *pcb) (pod v1.Pod, needsUpdate bool, err error) {
if realPet.pod == nil || expectedPet.pod == nil {
return pod, false, fmt.Errorf("Need a valid to and from pet for copy")
}
if realPet.parent.UID != expectedPet.parent.UID {
return pod, false, fmt.Errorf("Cannot copy pets with different parents")
}
ps := realPet.parent
if identityHash(ps, realPet.pod) == identityHash(ps, expectedPet.pod) {
return *realPet.pod, false, nil
}
copyPod := *realPet.pod
// This is the easiest way to give an identity to a pod. It won't work
// when we stop using names for id.
for _, idMapper := range newIdentityMappers(ps) {
idMapper.SetIdentity(expectedPet.id, &copyPod)
}
return copyPod, true, nil
}

View file

@ -0,0 +1,180 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package petset
import (
"fmt"
"reflect"
"strings"
"testing"
"k8s.io/kubernetes/pkg/api/v1"
apipod "k8s.io/kubernetes/pkg/api/v1/pod"
)
func TestPetIDName(t *testing.T) {
replicas := 3
ps := newStatefulSet(replicas)
for i := 0; i < replicas; i++ {
petName := fmt.Sprintf("%v-%d", ps.Name, i)
pcb, err := newPCB(fmt.Sprintf("%d", i), ps)
if err != nil {
t.Fatalf("Failed to generate pet %v", err)
}
pod := pcb.pod
if pod.Name != petName || pod.Namespace != ps.Namespace {
t.Errorf("Wrong name identity, expected %v", pcb.pod.Name)
}
}
}
func TestPetIDDNS(t *testing.T) {
replicas := 3
ps := newStatefulSet(replicas)
for i := 0; i < replicas; i++ {
petName := fmt.Sprintf("%v-%d", ps.Name, i)
petSubdomain := ps.Spec.ServiceName
pcb, err := newPCB(fmt.Sprintf("%d", i), ps)
pod := pcb.pod
if err != nil {
t.Fatalf("Failed to generate pet %v", err)
}
if hostname, ok := pod.Annotations[apipod.PodHostnameAnnotation]; !ok || hostname != petName {
t.Errorf("Wrong hostname: %v", hostname)
}
// TODO: Check this against the governing service.
if subdomain, ok := pod.Annotations[apipod.PodSubdomainAnnotation]; !ok || subdomain != petSubdomain {
t.Errorf("Wrong subdomain: %v", subdomain)
}
}
}
func TestPetIDVolume(t *testing.T) {
replicas := 3
ps := newStatefulSet(replicas)
for i := 0; i < replicas; i++ {
pcb, err := newPCB(fmt.Sprintf("%d", i), ps)
if err != nil {
t.Fatalf("Failed to generate pet %v", err)
}
pod := pcb.pod
petName := fmt.Sprintf("%v-%d", ps.Name, i)
claimName := fmt.Sprintf("datadir-%v", petName)
for _, v := range pod.Spec.Volumes {
switch v.Name {
case "datadir":
c := v.VolumeSource.PersistentVolumeClaim
if c == nil || c.ClaimName != claimName {
t.Fatalf("Unexpected claim %v", c)
}
if err := checkPodForMount(pod, "datadir"); err != nil {
t.Errorf("Expected pod mount: %v", err)
}
case "home":
h := v.VolumeSource.HostPath
if h == nil || h.Path != "/tmp/home" {
t.Errorf("Unexpected modification to hostpath, expected /tmp/home got %+v", h)
}
default:
t.Errorf("Unexpected volume %v", v.Name)
}
}
}
// TODO: Check volume mounts.
}
func TestPetIDVolumeClaims(t *testing.T) {
replicas := 3
ps := newStatefulSet(replicas)
for i := 0; i < replicas; i++ {
pcb, err := newPCB(fmt.Sprintf("%v", i), ps)
if err != nil {
t.Fatalf("Failed to generate pet %v", err)
}
pvcs := pcb.pvcs
petName := fmt.Sprintf("%v-%d", ps.Name, i)
claimName := fmt.Sprintf("datadir-%v", petName)
if len(pvcs) != 1 || pvcs[0].Name != claimName {
t.Errorf("Wrong pvc expected %v got %v", claimName, pvcs[0].Name)
}
}
}
func TestPetIDCrossAssignment(t *testing.T) {
replicas := 3
ps := newStatefulSet(replicas)
nameMapper := &NameIdentityMapper{ps}
volumeMapper := &VolumeIdentityMapper{ps}
networkMapper := &NetworkIdentityMapper{ps}
// Check that the name is consistent across identity.
for i := 0; i < replicas; i++ {
pet, _ := newPCB(fmt.Sprintf("%v", i), ps)
p := pet.pod
name := strings.Split(nameMapper.Identity(p), "/")[1]
network := networkMapper.Identity(p)
volume := volumeMapper.Identity(p)
petVolume := strings.Split(volume, ":")[1]
if petVolume != fmt.Sprintf("datadir-%v", name) {
t.Errorf("Unexpected pet volume name %v, expected %v", petVolume, name)
}
if network != fmt.Sprintf("%v.%v.%v", name, ps.Spec.ServiceName, ps.Namespace) {
t.Errorf("Unexpected pet network ID %v, expected %v", network, name)
}
t.Logf("[%v] volume: %+v, network: %+v, name: %+v", i, volume, network, name)
}
}
func TestPetIDReset(t *testing.T) {
replicas := 2
ps := newStatefulSet(replicas)
firstPCB, err := newPCB("1", ps)
secondPCB, err := newPCB("2", ps)
if identityHash(ps, firstPCB.pod) == identityHash(ps, secondPCB.pod) {
t.Fatalf("Failed to generate uniquey identities:\n%+v\n%+v", firstPCB.pod.Spec, secondPCB.pod.Spec)
}
userAdded := v1.Volume{
Name: "test",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory},
},
}
firstPCB.pod.Spec.Volumes = append(firstPCB.pod.Spec.Volumes, userAdded)
pod, needsUpdate, err := copyPetID(firstPCB, secondPCB)
if err != nil {
t.Errorf("%v", err)
}
if !needsUpdate {
t.Errorf("expected update since identity of %v was reset", secondPCB.pod.Name)
}
if identityHash(ps, &pod) != identityHash(ps, secondPCB.pod) {
t.Errorf("Failed to copy identity for pod %v -> %v", firstPCB.pod.Name, secondPCB.pod.Name)
}
foundVol := false
for _, v := range pod.Spec.Volumes {
if reflect.DeepEqual(v, userAdded) {
foundVol = true
break
}
}
if !foundVol {
t.Errorf("User added volume was corrupted by reset action.")
}
}

View file

@ -0,0 +1,163 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package petset
import (
"fmt"
"sort"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1"
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
"k8s.io/kubernetes/pkg/controller"
)
// newPCB generates a new PCB using the id string as a unique qualifier
func newPCB(id string, ps *apps.StatefulSet) (*pcb, error) {
petPod, err := controller.GetPodFromTemplate(&ps.Spec.Template, ps, nil)
if err != nil {
return nil, err
}
for _, im := range newIdentityMappers(ps) {
im.SetIdentity(id, petPod)
}
petPVCs := []v1.PersistentVolumeClaim{}
vMapper := &VolumeIdentityMapper{ps}
for _, c := range vMapper.GetClaims(id) {
petPVCs = append(petPVCs, c)
}
// TODO: Replace id field with IdentityHash, since id is more than just an index.
return &pcb{pod: petPod, pvcs: petPVCs, id: id, parent: ps}, nil
}
// petQueue is a custom datastructure that's resembles a queue of pets.
type petQueue struct {
pets []*pcb
idMapper identityMapper
}
// enqueue enqueues the given pet, evicting any pets with the same id
func (pt *petQueue) enqueue(p *pcb) {
if p == nil {
pt.pets = append(pt.pets, nil)
return
}
// Pop an existing pet from the know list, append the new pet to the end.
petList := []*pcb{}
petID := pt.idMapper.Identity(p.pod)
for i := range pt.pets {
if petID != pt.idMapper.Identity(pt.pets[i].pod) {
petList = append(petList, pt.pets[i])
}
}
pt.pets = petList
p.event = syncPet
pt.pets = append(pt.pets, p)
}
// dequeue returns the last element of the queue
func (pt *petQueue) dequeue() *pcb {
if pt.empty() {
glog.Warningf("Dequeue invoked on an empty queue")
return nil
}
l := len(pt.pets) - 1
pet := pt.pets[l]
pt.pets = pt.pets[:l]
return pet
}
// empty returns true if the pet queue is empty.
func (pt *petQueue) empty() bool {
return len(pt.pets) == 0
}
// NewPetQueue returns a queue for tracking pets
func NewPetQueue(ps *apps.StatefulSet, podList []*v1.Pod) *petQueue {
pt := petQueue{pets: []*pcb{}, idMapper: &NameIdentityMapper{ps}}
// Seed the queue with existing pets. Assume all pets are scheduled for
// deletion, enqueuing a pet will "undelete" it. We always want to delete
// from the higher ids, so sort by creation timestamp.
sort.Sort(PodsByCreationTimestamp(podList))
vMapper := VolumeIdentityMapper{ps}
for i := range podList {
pod := podList[i]
pt.pets = append(pt.pets, &pcb{pod: pod, pvcs: vMapper.GetClaimsForPet(pod), parent: ps, event: deletePet, id: fmt.Sprintf("%v", i)})
}
return &pt
}
// statefulsetIterator implements a simple iterator over pets in the given statefulset.
type statefulSetIterator struct {
// ps is the statefulset for this iterator.
ps *apps.StatefulSet
// queue contains the elements to iterate over.
queue *petQueue
// errs is a list because we always want the iterator to drain.
errs []error
// petCount is the number of pets iterated over.
petCount int32
}
// Next returns true for as long as there are elements in the underlying queue.
func (pi *statefulSetIterator) Next() bool {
var pet *pcb
var err error
if pi.petCount < *(pi.ps.Spec.Replicas) {
pet, err = newPCB(fmt.Sprintf("%d", pi.petCount), pi.ps)
if err != nil {
pi.errs = append(pi.errs, err)
// Don't stop iterating over the set on errors. Caller handles nil.
pet = nil
}
pi.queue.enqueue(pet)
pi.petCount++
}
// Keep the iterator running till we've deleted pets in the queue.
return !pi.queue.empty()
}
// Value dequeues an element from the queue.
func (pi *statefulSetIterator) Value() *pcb {
return pi.queue.dequeue()
}
// NewStatefulSetIterator returns a new iterator. All pods in the given podList
// are used to seed the queue of the iterator.
func NewStatefulSetIterator(ps *apps.StatefulSet, podList []*v1.Pod) *statefulSetIterator {
pi := &statefulSetIterator{
ps: ps,
queue: NewPetQueue(ps, podList),
errs: []error{},
petCount: 0,
}
return pi
}
// PodsByCreationTimestamp sorts a list of Pods by creation timestamp, using their names as a tie breaker.
type PodsByCreationTimestamp []*v1.Pod
func (o PodsByCreationTimestamp) Len() int { return len(o) }
func (o PodsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o PodsByCreationTimestamp) Less(i, j int) bool {
if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) {
return o[i].Name < o[j].Name
}
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
}

View file

@ -0,0 +1,150 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package petset
import (
"fmt"
"testing"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/sets"
)
func TestPetQueueCreates(t *testing.T) {
replicas := 3
ps := newStatefulSet(replicas)
q := NewPetQueue(ps, []*v1.Pod{})
for i := 0; i < replicas; i++ {
pet, _ := newPCB(fmt.Sprintf("%v", i), ps)
q.enqueue(pet)
p := q.dequeue()
if p.event != syncPet {
t.Errorf("Failed to retrieve sync event from queue")
}
}
if q.dequeue() != nil {
t.Errorf("Expected no pods")
}
}
func TestPetQueueScaleDown(t *testing.T) {
replicas := 1
ps := newStatefulSet(replicas)
// knownPods are the pods in the system
knownPods := newPodList(ps, 3)
q := NewPetQueue(ps, knownPods)
// The iterator will insert a single replica, the enqueue
// mimics that behavior.
pet, _ := newPCB(fmt.Sprintf("%v", 0), ps)
q.enqueue(pet)
deletes := sets.NewString(fmt.Sprintf("%v-1", ps.Name), fmt.Sprintf("%v-2", ps.Name))
syncs := sets.NewString(fmt.Sprintf("%v-0", ps.Name))
// Confirm that 2 known pods are deleted
for i := 0; i < 3; i++ {
p := q.dequeue()
switch p.event {
case syncPet:
if !syncs.Has(p.pod.Name) {
t.Errorf("Unexpected sync %v expecting %+v", p.pod.Name, syncs)
}
case deletePet:
if !deletes.Has(p.pod.Name) {
t.Errorf("Unexpected deletes %v expecting %+v", p.pod.Name, deletes)
}
}
}
if q.dequeue() != nil {
t.Errorf("Expected no pods")
}
}
func TestPetQueueScaleUp(t *testing.T) {
replicas := 5
ps := newStatefulSet(replicas)
// knownPods are pods in the system
knownPods := newPodList(ps, 2)
q := NewPetQueue(ps, knownPods)
for i := 0; i < 5; i++ {
pet, _ := newPCB(fmt.Sprintf("%v", i), ps)
q.enqueue(pet)
}
for i := 4; i >= 0; i-- {
pet := q.dequeue()
expectedName := fmt.Sprintf("%v-%d", ps.Name, i)
if pet.event != syncPet || pet.pod.Name != expectedName {
t.Errorf("Unexpected pod %+v, expected %v", pet.pod.Name, expectedName)
}
}
}
func TestStatefulSetIteratorRelist(t *testing.T) {
replicas := 5
ps := newStatefulSet(replicas)
// knownPods are pods in the system
knownPods := newPodList(ps, 5)
for i := range knownPods {
knownPods[i].Spec.NodeName = fmt.Sprintf("foo-node-%v", i)
knownPods[i].Status.Phase = v1.PodRunning
}
pi := NewStatefulSetIterator(ps, knownPods)
// A simple resync should not change identity of pods in the system
i := 0
for pi.Next() {
p := pi.Value()
if identityHash(ps, p.pod) != identityHash(ps, knownPods[i]) {
t.Errorf("Got unexpected identity hash from iterator.")
}
if p.event != syncPet {
t.Errorf("Got unexpected sync event for %v: %v", p.pod.Name, p.event)
}
i++
}
if i != 5 {
t.Errorf("Unexpected iterations %v, this probably means too many/few pods", i)
}
// Scale to 0 should delete all pods in system
*(ps.Spec.Replicas) = 0
pi = NewStatefulSetIterator(ps, knownPods)
i = 0
for pi.Next() {
p := pi.Value()
if p.event != deletePet {
t.Errorf("Got unexpected sync event for %v: %v", p.pod.Name, p.event)
}
i++
}
if i != 5 {
t.Errorf("Unexpected iterations %v, this probably means too many/few pods", i)
}
// Relist with 0 replicas should no-op
pi = NewStatefulSetIterator(ps, []*v1.Pod{})
if pi.Next() != false {
t.Errorf("Unexpected iteration without any replicas or pods in system")
}
}

326
vendor/k8s.io/kubernetes/pkg/controller/petset/pet.go generated vendored Normal file
View file

@ -0,0 +1,326 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package petset
import (
"fmt"
"strconv"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/runtime"
"github.com/golang/glog"
)
// petLifeCycleEvent is used to communicate high level actions the controller
// needs to take on a given pet. It's recorded in the pcb. The recognized values
// are listed below.
type petLifeCycleEvent string
const (
syncPet petLifeCycleEvent = "sync"
deletePet petLifeCycleEvent = "delete"
// updateRetries is the number of Get/Update cycles we perform when an
// update fails.
updateRetries = 3
// StatefulSetInitAnnotation is an annotation which when set, indicates that the
// pet has finished initializing itself.
// TODO: Replace this with init container status.
StatefulSetInitAnnotation = "pod.alpha.kubernetes.io/initialized"
)
// pcb is the control block used to transmit all updates about a single pet.
// It serves as the manifest for a single pet. Users must populate the pod
// and parent fields to pass it around safely.
type pcb struct {
// pod is the desired pet pod.
pod *v1.Pod
// pvcs is a list of desired persistent volume claims for the pet pod.
pvcs []v1.PersistentVolumeClaim
// event is the lifecycle event associated with this update.
event petLifeCycleEvent
// id is the identity index of this pet.
id string
// parent is a pointer to the parent statefulset.
parent *apps.StatefulSet
}
// pvcClient is a client for managing persistent volume claims.
type pvcClient interface {
// DeletePVCs deletes the pvcs in the given pcb.
DeletePVCs(*pcb) error
// SyncPVCs creates/updates pvcs in the given pcb.
SyncPVCs(*pcb) error
}
// petSyncer syncs a single pet.
type petSyncer struct {
petClient
// blockingPet is an unhealthy pet either from this iteration or a previous
// iteration, either because it is not yet Running, or being Deleted, that
// prevents other creates/deletions.
blockingPet *pcb
}
// errUnhealthyPet is returned when a we either know for sure a pet is unhealthy,
// or don't know its state but assume it is unhealthy. It's used as a signal to the caller for further operations like updating status.replicas.
// This is not a fatal error.
type errUnhealthyPet string
func (e errUnhealthyPet) Error() string {
return string(e)
}
// Sync syncs the given pet.
func (p *petSyncer) Sync(pet *pcb) error {
if pet == nil {
return nil
}
realPet, exists, err := p.Get(pet)
if err != nil {
return err
}
// There is not constraint except quota on the number of pvcs created.
// This is done per pet so we get a working cluster ASAP, even if user
// runs out of quota.
if err := p.SyncPVCs(pet); err != nil {
return err
}
// if pet failed - we need to remove old one because of consistent naming
if exists && realPet.pod.Status.Phase == v1.PodFailed {
glog.V(2).Infof("Deleting evicted pod %v/%v", realPet.pod.Namespace, realPet.pod.Name)
if err := p.petClient.Delete(realPet); err != nil {
return err
}
} else if exists {
if !p.isHealthy(realPet.pod) {
glog.V(4).Infof("StatefulSet %v waiting on unhealthy pet %v", pet.parent.Name, realPet.pod.Name)
}
return p.Update(realPet, pet)
}
if p.blockingPet != nil {
message := errUnhealthyPet(fmt.Sprintf("Create of %v in StatefulSet %v blocked by unhealthy pet %v", pet.pod.Name, pet.parent.Name, p.blockingPet.pod.Name))
glog.V(4).Infof(message.Error())
return message
}
// This is counted as a create, even if it fails. We can't skip indices
// because some pets might allocate a special role to earlier indices.
// The returned error will force a requeue.
// TODO: What's the desired behavior if pet-0 is deleted while pet-1 is
// not yet healthy? currently pet-0 will wait till pet-1 is healthy,
// this feels safer, but might lead to deadlock.
p.blockingPet = pet
if err := p.Create(pet); err != nil {
return err
}
return nil
}
// Delete deletes the given pet, if no other pet in the statefulset is blocking a
// scale event.
func (p *petSyncer) Delete(pet *pcb) error {
if pet == nil {
return nil
}
realPet, exists, err := p.Get(pet)
if err != nil {
return err
}
if !exists {
return nil
}
if p.blockingPet != nil {
glog.V(4).Infof("Delete of %v in StatefulSet %v blocked by unhealthy pet %v", realPet.pod.Name, pet.parent.Name, p.blockingPet.pod.Name)
return nil
}
// This is counted as a delete, even if it fails.
// The returned error will force a requeue.
p.blockingPet = realPet
if !p.isDying(realPet.pod) {
glog.V(2).Infof("StatefulSet %v deleting pet %v/%v", pet.parent.Name, pet.pod.Namespace, pet.pod.Name)
return p.petClient.Delete(pet)
}
glog.V(4).Infof("StatefulSet %v waiting on pet %v to die in %v", pet.parent.Name, realPet.pod.Name, realPet.pod.DeletionTimestamp)
return nil
}
// petClient is a client for managing pets.
type petClient interface {
pvcClient
petHealthChecker
Delete(*pcb) error
Get(*pcb) (*pcb, bool, error)
Create(*pcb) error
Update(*pcb, *pcb) error
}
// apiServerPetClient is a statefulset aware Kubernetes client.
type apiServerPetClient struct {
c clientset.Interface
recorder record.EventRecorder
petHealthChecker
}
// Get gets the pet in the pcb from the apiserver.
func (p *apiServerPetClient) Get(pet *pcb) (*pcb, bool, error) {
ns := pet.parent.Namespace
pod, err := p.c.Core().Pods(ns).Get(pet.pod.Name)
if errors.IsNotFound(err) {
return nil, false, nil
}
if err != nil {
return nil, false, err
}
realPet := *pet
realPet.pod = pod
return &realPet, true, nil
}
// Delete deletes the pet in the pcb from the apiserver.
func (p *apiServerPetClient) Delete(pet *pcb) error {
err := p.c.Core().Pods(pet.parent.Namespace).Delete(pet.pod.Name, nil)
if errors.IsNotFound(err) {
err = nil
}
p.event(pet.parent, "Delete", fmt.Sprintf("pet: %v", pet.pod.Name), err)
return err
}
// Create creates the pet in the pcb.
func (p *apiServerPetClient) Create(pet *pcb) error {
_, err := p.c.Core().Pods(pet.parent.Namespace).Create(pet.pod)
p.event(pet.parent, "Create", fmt.Sprintf("pet: %v", pet.pod.Name), err)
return err
}
// Update updates the pet in the 'pet' pcb to match the pet in the 'expectedPet' pcb.
// If the pod object of a pet which to be updated has been changed in server side, we
// will get the actual value and set pet identity before retries.
func (p *apiServerPetClient) Update(pet *pcb, expectedPet *pcb) (updateErr error) {
pc := p.c.Core().Pods(pet.parent.Namespace)
for i := 0; ; i++ {
updatePod, needsUpdate, err := copyPetID(pet, expectedPet)
if err != nil || !needsUpdate {
return err
}
glog.V(4).Infof("Resetting pet %v/%v to match StatefulSet %v spec", pet.pod.Namespace, pet.pod.Name, pet.parent.Name)
_, updateErr = pc.Update(&updatePod)
if updateErr == nil || i >= updateRetries {
return updateErr
}
getPod, getErr := pc.Get(updatePod.Name)
if getErr != nil {
return getErr
}
pet.pod = getPod
}
}
// DeletePVCs should delete PVCs, when implemented.
func (p *apiServerPetClient) DeletePVCs(pet *pcb) error {
// TODO: Implement this when we delete pvcs.
return nil
}
func (p *apiServerPetClient) getPVC(pvcName, pvcNamespace string) (*v1.PersistentVolumeClaim, error) {
pvc, err := p.c.Core().PersistentVolumeClaims(pvcNamespace).Get(pvcName)
return pvc, err
}
func (p *apiServerPetClient) createPVC(pvc *v1.PersistentVolumeClaim) error {
_, err := p.c.Core().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
return err
}
// SyncPVCs syncs pvcs in the given pcb.
func (p *apiServerPetClient) SyncPVCs(pet *pcb) error {
errmsg := ""
// Create new claims.
for i, pvc := range pet.pvcs {
_, err := p.getPVC(pvc.Name, pet.parent.Namespace)
if err != nil {
if errors.IsNotFound(err) {
var err error
if err = p.createPVC(&pet.pvcs[i]); err != nil {
errmsg += fmt.Sprintf("Failed to create %v: %v", pvc.Name, err)
}
p.event(pet.parent, "Create", fmt.Sprintf("pvc: %v", pvc.Name), err)
} else {
errmsg += fmt.Sprintf("Error trying to get pvc %v, %v.", pvc.Name, err)
}
}
// TODO: Check resource requirements and accessmodes, update if necessary
}
if len(errmsg) != 0 {
return fmt.Errorf("%v", errmsg)
}
return nil
}
// event formats an event for the given runtime object.
func (p *apiServerPetClient) event(obj runtime.Object, reason, msg string, err error) {
if err != nil {
p.recorder.Eventf(obj, v1.EventTypeWarning, fmt.Sprintf("Failed%v", reason), fmt.Sprintf("%v, error: %v", msg, err))
} else {
p.recorder.Eventf(obj, v1.EventTypeNormal, fmt.Sprintf("Successful%v", reason), msg)
}
}
// petHealthChecker is an interface to check pet health. It makes a boolean
// decision based on the given pod.
type petHealthChecker interface {
isHealthy(*v1.Pod) bool
isDying(*v1.Pod) bool
}
// defaultPetHealthChecks does basic health checking.
// It doesn't update, probe or get the pod.
type defaultPetHealthChecker struct{}
// isHealthy returns true if the pod is ready & running. If the pod has the
// "pod.alpha.kubernetes.io/initialized" annotation set to "false", pod state is ignored.
func (d *defaultPetHealthChecker) isHealthy(pod *v1.Pod) bool {
if pod == nil || pod.Status.Phase != v1.PodRunning {
return false
}
podReady := v1.IsPodReady(pod)
// User may have specified a pod readiness override through a debug annotation.
initialized, ok := pod.Annotations[StatefulSetInitAnnotation]
if ok {
if initAnnotation, err := strconv.ParseBool(initialized); err != nil {
glog.V(4).Infof("Failed to parse %v annotation on pod %v: %v", StatefulSetInitAnnotation, pod.Name, err)
} else if !initAnnotation {
glog.V(4).Infof("StatefulSet pod %v waiting on annotation %v", pod.Name, StatefulSetInitAnnotation)
podReady = initAnnotation
}
}
return podReady
}
// isDying returns true if the pod has a non-nil deletion timestamp. Since the
// timestamp can only decrease, once this method returns true for a given pet, it
// will never return false.
func (d *defaultPetHealthChecker) isDying(pod *v1.Pod) bool {
return pod != nil && pod.DeletionTimestamp != nil
}

View file

@ -0,0 +1,370 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package petset
import (
"fmt"
"reflect"
"sort"
"time"
"k8s.io/kubernetes/pkg/api/v1"
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/errors"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/util/workqueue"
"k8s.io/kubernetes/pkg/watch"
"github.com/golang/glog"
)
const (
// Time to sleep before polling to see if the pod cache has synced.
PodStoreSyncedPollPeriod = 100 * time.Millisecond
// number of retries for a status update.
statusUpdateRetries = 2
// period to relist statefulsets and verify pets
statefulSetResyncPeriod = 30 * time.Second
)
// StatefulSetController controls statefulsets.
type StatefulSetController struct {
kubeClient clientset.Interface
// newSyncer returns an interface capable of syncing a single pet.
// Abstracted out for testing.
newSyncer func(*pcb) *petSyncer
// podStore is a cache of watched pods.
podStore cache.StoreToPodLister
// podStoreSynced returns true if the pod store has synced at least once.
podStoreSynced func() bool
// Watches changes to all pods.
podController cache.ControllerInterface
// A store of StatefulSets, populated by the psController.
psStore cache.StoreToStatefulSetLister
// Watches changes to all StatefulSets.
psController *cache.Controller
// A store of the 1 unhealthy pet blocking progress for a given ps
blockingPetStore *unhealthyPetTracker
// Controllers that need to be synced.
queue workqueue.RateLimitingInterface
// syncHandler handles sync events for statefulsets.
// Abstracted as a func to allow injection for testing.
syncHandler func(psKey string) error
}
// NewStatefulSetController creates a new statefulset controller.
func NewStatefulSetController(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod time.Duration) *StatefulSetController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
recorder := eventBroadcaster.NewRecorder(v1.EventSource{Component: "statefulset"})
pc := &apiServerPetClient{kubeClient, recorder, &defaultPetHealthChecker{}}
psc := &StatefulSetController{
kubeClient: kubeClient,
blockingPetStore: newUnHealthyPetTracker(pc),
newSyncer: func(blockingPet *pcb) *petSyncer {
return &petSyncer{pc, blockingPet}
},
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "statefulset"),
}
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
// lookup the statefulset and enqueue
AddFunc: psc.addPod,
// lookup current and old statefulset if labels changed
UpdateFunc: psc.updatePod,
// lookup statefulset accounting for deletion tombstones
DeleteFunc: psc.deletePod,
})
psc.podStore.Indexer = podInformer.GetIndexer()
psc.podController = podInformer.GetController()
psc.psStore.Store, psc.psController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return psc.kubeClient.Apps().StatefulSets(v1.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return psc.kubeClient.Apps().StatefulSets(v1.NamespaceAll).Watch(options)
},
},
&apps.StatefulSet{},
statefulSetResyncPeriod,
cache.ResourceEventHandlerFuncs{
AddFunc: psc.enqueueStatefulSet,
UpdateFunc: func(old, cur interface{}) {
oldPS := old.(*apps.StatefulSet)
curPS := cur.(*apps.StatefulSet)
if oldPS.Status.Replicas != curPS.Status.Replicas {
glog.V(4).Infof("Observed updated replica count for StatefulSet: %v, %d->%d", curPS.Name, oldPS.Status.Replicas, curPS.Status.Replicas)
}
psc.enqueueStatefulSet(cur)
},
DeleteFunc: psc.enqueueStatefulSet,
},
)
// TODO: Watch volumes
psc.podStoreSynced = psc.podController.HasSynced
psc.syncHandler = psc.Sync
return psc
}
// Run runs the statefulset controller.
func (psc *StatefulSetController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
glog.Infof("Starting statefulset controller")
go psc.podController.Run(stopCh)
go psc.psController.Run(stopCh)
for i := 0; i < workers; i++ {
go wait.Until(psc.worker, time.Second, stopCh)
}
<-stopCh
glog.Infof("Shutting down statefulset controller")
psc.queue.ShutDown()
}
// addPod adds the statefulset for the pod to the sync queue
func (psc *StatefulSetController) addPod(obj interface{}) {
pod := obj.(*v1.Pod)
glog.V(4).Infof("Pod %s created, labels: %+v", pod.Name, pod.Labels)
ps := psc.getStatefulSetForPod(pod)
if ps == nil {
return
}
psc.enqueueStatefulSet(ps)
}
// updatePod adds the statefulset for the current and old pods to the sync queue.
// If the labels of the pod didn't change, this method enqueues a single statefulset.
func (psc *StatefulSetController) updatePod(old, cur interface{}) {
curPod := cur.(*v1.Pod)
oldPod := old.(*v1.Pod)
if curPod.ResourceVersion == oldPod.ResourceVersion {
// Periodic resync will send update events for all known pods.
// Two different versions of the same pod will always have different RVs.
return
}
ps := psc.getStatefulSetForPod(curPod)
if ps == nil {
return
}
psc.enqueueStatefulSet(ps)
if !reflect.DeepEqual(curPod.Labels, oldPod.Labels) {
if oldPS := psc.getStatefulSetForPod(oldPod); oldPS != nil {
psc.enqueueStatefulSet(oldPS)
}
}
}
// deletePod enqueues the statefulset for the pod accounting for deletion tombstones.
func (psc *StatefulSetController) deletePod(obj interface{}) {
pod, ok := obj.(*v1.Pod)
// When a delete is dropped, the relist will notice a pod in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the pod
// changed labels the new StatefulSet will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("couldn't get object from tombstone %+v", obj)
return
}
pod, ok = tombstone.Obj.(*v1.Pod)
if !ok {
glog.Errorf("tombstone contained object that is not a pod %+v", obj)
return
}
}
glog.V(4).Infof("Pod %s/%s deleted through %v.", pod.Namespace, pod.Name, utilruntime.GetCaller())
if ps := psc.getStatefulSetForPod(pod); ps != nil {
psc.enqueueStatefulSet(ps)
}
}
// getPodsForStatefulSets returns the pods that match the selectors of the given statefulset.
func (psc *StatefulSetController) getPodsForStatefulSet(ps *apps.StatefulSet) ([]*v1.Pod, error) {
// TODO: Do we want the statefulset to fight with RCs? check parent statefulset annoation, or name prefix?
sel, err := metav1.LabelSelectorAsSelector(ps.Spec.Selector)
if err != nil {
return []*v1.Pod{}, err
}
pods, err := psc.podStore.Pods(ps.Namespace).List(sel)
if err != nil {
return []*v1.Pod{}, err
}
// TODO: Do we need to copy?
result := make([]*v1.Pod, 0, len(pods))
for i := range pods {
result = append(result, &(*pods[i]))
}
return result, nil
}
// getStatefulSetForPod returns the pet set managing the given pod.
func (psc *StatefulSetController) getStatefulSetForPod(pod *v1.Pod) *apps.StatefulSet {
ps, err := psc.psStore.GetPodStatefulSets(pod)
if err != nil {
glog.V(4).Infof("No StatefulSets found for pod %v, StatefulSet controller will avoid syncing", pod.Name)
return nil
}
// Resolve a overlapping statefulset tie by creation timestamp.
// Let's hope users don't create overlapping statefulsets.
if len(ps) > 1 {
glog.Errorf("user error! more than one StatefulSet is selecting pods with labels: %+v", pod.Labels)
sort.Sort(overlappingStatefulSets(ps))
}
return &ps[0]
}
// enqueueStatefulSet enqueues the given statefulset in the work queue.
func (psc *StatefulSetController) enqueueStatefulSet(obj interface{}) {
key, err := controller.KeyFunc(obj)
if err != nil {
glog.Errorf("Cound't get key for object %+v: %v", obj, err)
return
}
psc.queue.Add(key)
}
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (psc *StatefulSetController) worker() {
for {
func() {
key, quit := psc.queue.Get()
if quit {
return
}
defer psc.queue.Done(key)
if err := psc.syncHandler(key.(string)); err != nil {
glog.Errorf("Error syncing StatefulSet %v, requeuing: %v", key.(string), err)
psc.queue.AddRateLimited(key)
} else {
psc.queue.Forget(key)
}
}()
}
}
// Sync syncs the given statefulset.
func (psc *StatefulSetController) Sync(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing statefulset %q (%v)", key, time.Now().Sub(startTime))
}()
if !psc.podStoreSynced() {
// Sleep so we give the pod reflector goroutine a chance to run.
time.Sleep(PodStoreSyncedPollPeriod)
return fmt.Errorf("waiting for pods controller to sync")
}
obj, exists, err := psc.psStore.Store.GetByKey(key)
if !exists {
if err = psc.blockingPetStore.store.Delete(key); err != nil {
return err
}
glog.Infof("StatefulSet has been deleted %v", key)
return nil
}
if err != nil {
glog.Errorf("Unable to retrieve StatefulSet %v from store: %v", key, err)
return err
}
ps := *obj.(*apps.StatefulSet)
petList, err := psc.getPodsForStatefulSet(&ps)
if err != nil {
return err
}
numPets, syncErr := psc.syncStatefulSet(&ps, petList)
if updateErr := updatePetCount(psc.kubeClient.Apps(), ps, numPets); updateErr != nil {
glog.Infof("Failed to update replica count for statefulset %v/%v; requeuing; error: %v", ps.Namespace, ps.Name, updateErr)
return errors.NewAggregate([]error{syncErr, updateErr})
}
return syncErr
}
// syncStatefulSet syncs a tuple of (statefulset, pets).
func (psc *StatefulSetController) syncStatefulSet(ps *apps.StatefulSet, pets []*v1.Pod) (int, error) {
glog.V(2).Infof("Syncing StatefulSet %v/%v with %d pods", ps.Namespace, ps.Name, len(pets))
it := NewStatefulSetIterator(ps, pets)
blockingPet, err := psc.blockingPetStore.Get(ps, pets)
if err != nil {
return 0, err
}
if blockingPet != nil {
glog.Infof("StatefulSet %v blocked from scaling on pod %v", ps.Name, blockingPet.pod.Name)
}
petManager := psc.newSyncer(blockingPet)
numPets := 0
for it.Next() {
pet := it.Value()
if pet == nil {
continue
}
switch pet.event {
case syncPet:
err = petManager.Sync(pet)
if err == nil {
numPets++
}
case deletePet:
err = petManager.Delete(pet)
}
switch err.(type) {
case errUnhealthyPet:
// We are not passing this error up, but we don't increment numPets if we encounter it,
// since numPets directly translates to statefulset.status.replicas
continue
case nil:
continue
default:
it.errs = append(it.errs, err)
}
}
if err := psc.blockingPetStore.Add(petManager.blockingPet); err != nil {
it.errs = append(it.errs, err)
}
// TODO: GC pvcs. We can't delete them per pet because of grace period, and
// in fact we *don't want to* till statefulset is stable to guarantee that bugs
// in the controller don't corrupt user data.
return numPets, errors.NewAggregate(it.errs)
}

View file

@ -0,0 +1,331 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package petset
import (
"fmt"
"math/rand"
"reflect"
"testing"
"k8s.io/kubernetes/pkg/api/v1"
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
"k8s.io/kubernetes/pkg/client/cache"
fakeinternal "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/apps/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/apps/v1beta1/fake"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/util/errors"
)
func newFakeStatefulSetController() (*StatefulSetController, *fakePetClient) {
fpc := newFakePetClient()
return &StatefulSetController{
kubeClient: nil,
blockingPetStore: newUnHealthyPetTracker(fpc),
podStoreSynced: func() bool { return true },
psStore: cache.StoreToStatefulSetLister{Store: cache.NewStore(controller.KeyFunc)},
podStore: cache.StoreToPodLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{})},
newSyncer: func(blockingPet *pcb) *petSyncer {
return &petSyncer{fpc, blockingPet}
},
}, fpc
}
func checkPets(ps *apps.StatefulSet, creates, deletes int, fc *fakePetClient, t *testing.T) {
if fc.petsCreated != creates || fc.petsDeleted != deletes {
t.Errorf("Found (creates: %d, deletes: %d), expected (creates: %d, deletes: %d)", fc.petsCreated, fc.petsDeleted, creates, deletes)
}
gotClaims := map[string]v1.PersistentVolumeClaim{}
for _, pvc := range fc.claims {
gotClaims[pvc.Name] = pvc
}
for i := range fc.pets {
expectedPet, _ := newPCB(fmt.Sprintf("%v", i), ps)
if identityHash(ps, fc.pets[i].pod) != identityHash(ps, expectedPet.pod) {
t.Errorf("Unexpected pod at index %d", i)
}
for _, pvc := range expectedPet.pvcs {
gotPVC, ok := gotClaims[pvc.Name]
if !ok {
t.Errorf("PVC %v not created for pod %v", pvc.Name, expectedPet.pod.Name)
}
if !reflect.DeepEqual(gotPVC.Spec, pvc.Spec) {
t.Errorf("got PVC %v differs from created pvc", pvc.Name)
}
}
}
}
func scaleStatefulSet(t *testing.T, ps *apps.StatefulSet, psc *StatefulSetController, fc *fakePetClient, scale int) error {
errs := []error{}
for i := 0; i < scale; i++ {
pl := fc.getPodList()
if len(pl) != i {
t.Errorf("Unexpected number of pods, expected %d found %d", i, len(pl))
}
if _, syncErr := psc.syncStatefulSet(ps, pl); syncErr != nil {
errs = append(errs, syncErr)
}
fc.setHealthy(i)
checkPets(ps, i+1, 0, fc, t)
}
return errors.NewAggregate(errs)
}
func saturateStatefulSet(t *testing.T, ps *apps.StatefulSet, psc *StatefulSetController, fc *fakePetClient) {
err := scaleStatefulSet(t, ps, psc, fc, int(*(ps.Spec.Replicas)))
if err != nil {
t.Errorf("Error scaleStatefulSet: %v", err)
}
}
func TestStatefulSetControllerCreates(t *testing.T) {
psc, fc := newFakeStatefulSetController()
replicas := 3
ps := newStatefulSet(replicas)
saturateStatefulSet(t, ps, psc, fc)
podList := fc.getPodList()
// Deleted pet gets recreated
fc.pets = fc.pets[:replicas-1]
if _, err := psc.syncStatefulSet(ps, podList); err != nil {
t.Errorf("Error syncing StatefulSet: %v", err)
}
checkPets(ps, replicas+1, 0, fc, t)
}
func TestStatefulSetControllerDeletes(t *testing.T) {
psc, fc := newFakeStatefulSetController()
replicas := 4
ps := newStatefulSet(replicas)
saturateStatefulSet(t, ps, psc, fc)
// Drain
errs := []error{}
*(ps.Spec.Replicas) = 0
knownPods := fc.getPodList()
for i := replicas - 1; i >= 0; i-- {
if len(fc.pets) != i+1 {
t.Errorf("Unexpected number of pods, expected %d found %d", i+1, len(fc.pets))
}
if _, syncErr := psc.syncStatefulSet(ps, knownPods); syncErr != nil {
errs = append(errs, syncErr)
}
}
if len(errs) != 0 {
t.Errorf("Error syncing StatefulSet: %v", errors.NewAggregate(errs))
}
checkPets(ps, replicas, replicas, fc, t)
}
func TestStatefulSetControllerRespectsTermination(t *testing.T) {
psc, fc := newFakeStatefulSetController()
replicas := 4
ps := newStatefulSet(replicas)
saturateStatefulSet(t, ps, psc, fc)
fc.setDeletionTimestamp(replicas - 1)
*(ps.Spec.Replicas) = 2
_, err := psc.syncStatefulSet(ps, fc.getPodList())
if err != nil {
t.Errorf("Error syncing StatefulSet: %v", err)
}
// Finding a pod with the deletion timestamp will pause all deletions.
knownPods := fc.getPodList()
if len(knownPods) != 4 {
t.Errorf("Pods deleted prematurely before deletion timestamp expired, len %d", len(knownPods))
}
fc.pets = fc.pets[:replicas-1]
_, err = psc.syncStatefulSet(ps, fc.getPodList())
if err != nil {
t.Errorf("Error syncing StatefulSet: %v", err)
}
checkPets(ps, replicas, 1, fc, t)
}
func TestStatefulSetControllerRespectsOrder(t *testing.T) {
psc, fc := newFakeStatefulSetController()
replicas := 4
ps := newStatefulSet(replicas)
saturateStatefulSet(t, ps, psc, fc)
errs := []error{}
*(ps.Spec.Replicas) = 0
// Shuffle known list and check that pets are deleted in reverse
knownPods := fc.getPodList()
for i := range knownPods {
j := rand.Intn(i + 1)
knownPods[i], knownPods[j] = knownPods[j], knownPods[i]
}
for i := 0; i < replicas; i++ {
if len(fc.pets) != replicas-i {
t.Errorf("Unexpected number of pods, expected %d found %d", i, len(fc.pets))
}
if _, syncErr := psc.syncStatefulSet(ps, knownPods); syncErr != nil {
errs = append(errs, syncErr)
}
checkPets(ps, replicas, i+1, fc, t)
}
if len(errs) != 0 {
t.Errorf("Error syncing StatefulSet: %v", errors.NewAggregate(errs))
}
}
func TestStatefulSetControllerBlocksScaling(t *testing.T) {
psc, fc := newFakeStatefulSetController()
replicas := 5
ps := newStatefulSet(replicas)
scaleStatefulSet(t, ps, psc, fc, 3)
// Create 4th pet, then before flipping it to healthy, kill the first pet.
// There should only be 1 not-healty pet at a time.
pl := fc.getPodList()
if _, err := psc.syncStatefulSet(ps, pl); err != nil {
t.Errorf("Error syncing StatefulSet: %v", err)
}
deletedPod := pl[0]
fc.deletePetAtIndex(0)
pl = fc.getPodList()
if _, err := psc.syncStatefulSet(ps, pl); err != nil {
t.Errorf("Error syncing StatefulSet: %v", err)
}
newPodList := fc.getPodList()
for _, p := range newPodList {
if p.Name == deletedPod.Name {
t.Errorf("Deleted pod was created while existing pod was unhealthy")
}
}
fc.setHealthy(len(newPodList) - 1)
if _, err := psc.syncStatefulSet(ps, pl); err != nil {
t.Errorf("Error syncing StatefulSet: %v", err)
}
found := false
for _, p := range fc.getPodList() {
if p.Name == deletedPod.Name {
found = true
break
}
}
if !found {
t.Errorf("Deleted pod was not created after existing pods became healthy")
}
}
func TestStatefulSetBlockingPetIsCleared(t *testing.T) {
psc, fc := newFakeStatefulSetController()
ps := newStatefulSet(3)
scaleStatefulSet(t, ps, psc, fc, 1)
if blocking, err := psc.blockingPetStore.Get(ps, fc.getPodList()); err != nil || blocking != nil {
t.Errorf("Unexpected blocking pod %v, err %v", blocking, err)
}
// 1 not yet healthy pet
psc.syncStatefulSet(ps, fc.getPodList())
if blocking, err := psc.blockingPetStore.Get(ps, fc.getPodList()); err != nil || blocking == nil {
t.Errorf("Expected blocking pod %v, err %v", blocking, err)
}
// Deleting the statefulset should clear the blocking pet
if err := psc.psStore.Store.Delete(ps); err != nil {
t.Fatalf("Unable to delete pod %v from statefulset controller store.", ps.Name)
}
if err := psc.Sync(fmt.Sprintf("%v/%v", ps.Namespace, ps.Name)); err != nil {
t.Errorf("Error during sync of deleted statefulset %v", err)
}
fc.pets = []*pcb{}
fc.petsCreated = 0
if blocking, err := psc.blockingPetStore.Get(ps, fc.getPodList()); err != nil || blocking != nil {
t.Errorf("Unexpected blocking pod %v, err %v", blocking, err)
}
saturateStatefulSet(t, ps, psc, fc)
// Make sure we don't leak the final blockin pet in the store
psc.syncStatefulSet(ps, fc.getPodList())
if p, exists, err := psc.blockingPetStore.store.GetByKey(fmt.Sprintf("%v/%v", ps.Namespace, ps.Name)); err != nil || exists {
t.Errorf("Unexpected blocking pod, err %v: %+v", err, p)
}
}
func TestSyncStatefulSetBlockedPet(t *testing.T) {
psc, fc := newFakeStatefulSetController()
ps := newStatefulSet(3)
i, _ := psc.syncStatefulSet(ps, fc.getPodList())
if i != len(fc.getPodList()) {
t.Errorf("syncStatefulSet should return actual amount of pods")
}
}
type fakeClient struct {
fakeinternal.Clientset
statefulsetClient *fakeStatefulSetClient
}
func (c *fakeClient) Apps() v1beta1.AppsV1beta1Interface {
return &fakeApps{c, &fake.FakeAppsV1beta1{}}
}
type fakeApps struct {
*fakeClient
*fake.FakeAppsV1beta1
}
func (c *fakeApps) StatefulSets(namespace string) v1beta1.StatefulSetInterface {
c.statefulsetClient.Namespace = namespace
return c.statefulsetClient
}
type fakeStatefulSetClient struct {
*fake.FakeStatefulSets
Namespace string
replicas int32
}
func (f *fakeStatefulSetClient) UpdateStatus(statefulset *apps.StatefulSet) (*apps.StatefulSet, error) {
f.replicas = statefulset.Status.Replicas
return statefulset, nil
}
func TestStatefulSetReplicaCount(t *testing.T) {
fpsc := &fakeStatefulSetClient{}
psc, _ := newFakeStatefulSetController()
psc.kubeClient = &fakeClient{
statefulsetClient: fpsc,
}
ps := newStatefulSet(3)
psKey := fmt.Sprintf("%v/%v", ps.Namespace, ps.Name)
psc.psStore.Store.Add(ps)
if err := psc.Sync(psKey); err != nil {
t.Errorf("Error during sync of deleted statefulset %v", err)
}
if fpsc.replicas != 1 {
t.Errorf("Replicas count sent as status update for StatefulSet should be 1, is %d instead", fpsc.replicas)
}
}

View file

@ -0,0 +1,157 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package petset
import (
"fmt"
"sync"
"k8s.io/kubernetes/pkg/api/v1"
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
"k8s.io/kubernetes/pkg/client/cache"
appsclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/apps/v1beta1"
"k8s.io/kubernetes/pkg/controller"
"github.com/golang/glog"
)
// overlappingStatefulSets sorts a list of StatefulSets by creation timestamp, using their names as a tie breaker.
// Generally used to tie break between StatefulSets that have overlapping selectors.
type overlappingStatefulSets []apps.StatefulSet
func (o overlappingStatefulSets) Len() int { return len(o) }
func (o overlappingStatefulSets) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o overlappingStatefulSets) Less(i, j int) bool {
if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) {
return o[i].Name < o[j].Name
}
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
}
// updatePetCount attempts to update the Status.Replicas of the given StatefulSet, with a single GET/PUT retry.
func updatePetCount(psClient appsclientset.StatefulSetsGetter, ps apps.StatefulSet, numPets int) (updateErr error) {
if ps.Status.Replicas == int32(numPets) || psClient == nil {
return nil
}
var getErr error
for i, ps := 0, &ps; ; i++ {
glog.V(4).Infof(fmt.Sprintf("Updating replica count for StatefulSet: %s/%s, ", ps.Namespace, ps.Name) +
fmt.Sprintf("replicas %d->%d (need %d), ", ps.Status.Replicas, numPets, *(ps.Spec.Replicas)))
ps.Status = apps.StatefulSetStatus{Replicas: int32(numPets)}
_, updateErr = psClient.StatefulSets(ps.Namespace).UpdateStatus(ps)
if updateErr == nil || i >= statusUpdateRetries {
return updateErr
}
if ps, getErr = psClient.StatefulSets(ps.Namespace).Get(ps.Name); getErr != nil {
return getErr
}
}
}
// unhealthyPetTracker tracks unhealthy pets for statefulsets.
type unhealthyPetTracker struct {
pc petClient
store cache.Store
storeLock sync.Mutex
}
// Get returns a previously recorded blocking pet for the given statefulset.
func (u *unhealthyPetTracker) Get(ps *apps.StatefulSet, knownPets []*v1.Pod) (*pcb, error) {
u.storeLock.Lock()
defer u.storeLock.Unlock()
// We "Get" by key but "Add" by object because the store interface doesn't
// allow us to Get/Add a related obj (eg statefulset: blocking pet).
key, err := controller.KeyFunc(ps)
if err != nil {
return nil, err
}
obj, exists, err := u.store.GetByKey(key)
if err != nil {
return nil, err
}
hc := defaultPetHealthChecker{}
// There's no unhealthy pet blocking a scale event, but this might be
// a controller manager restart. If it is, knownPets can be trusted.
if !exists {
for _, p := range knownPets {
if hc.isHealthy(p) && !hc.isDying(p) {
glog.V(4).Infof("Ignoring healthy pod %v for StatefulSet %v", p.Name, ps.Name)
continue
}
glog.V(4).Infof("No recorded blocking pod, but found unhealthy pod %v for StatefulSet %v", p.Name, ps.Name)
return &pcb{pod: p, parent: ps}, nil
}
return nil, nil
}
// This is a pet that's blocking further creates/deletes of a statefulset. If it
// disappears, it's no longer blocking. If it exists, it continues to block
// till it turns healthy or disappears.
bp := obj.(*pcb)
blockingPet, exists, err := u.pc.Get(bp)
if err != nil {
return nil, err
}
if !exists {
glog.V(4).Infof("Clearing blocking pod %v for StatefulSet %v because it's been deleted", bp.pod.Name, ps.Name)
return nil, nil
}
blockingPetPod := blockingPet.pod
if hc.isHealthy(blockingPetPod) && !hc.isDying(blockingPetPod) {
glog.V(4).Infof("Clearing blocking pod %v for StatefulSet %v because it's healthy", bp.pod.Name, ps.Name)
u.store.Delete(blockingPet)
blockingPet = nil
}
return blockingPet, nil
}
// Add records the given pet as a blocking pet.
func (u *unhealthyPetTracker) Add(blockingPet *pcb) error {
u.storeLock.Lock()
defer u.storeLock.Unlock()
if blockingPet == nil {
return nil
}
glog.V(4).Infof("Adding blocking pod %v for StatefulSet %v", blockingPet.pod.Name, blockingPet.parent.Name)
return u.store.Add(blockingPet)
}
// newUnHealthyPetTracker tracks unhealthy pets that block progress of statefulsets.
func newUnHealthyPetTracker(pc petClient) *unhealthyPetTracker {
return &unhealthyPetTracker{pc: pc, store: cache.NewStore(pcbKeyFunc)}
}
// pcbKeyFunc computes the key for a given pcb.
// If it's given a key, it simply returns it.
func pcbKeyFunc(obj interface{}) (string, error) {
if key, ok := obj.(string); ok {
return key, nil
}
p, ok := obj.(*pcb)
if !ok {
return "", fmt.Errorf("not a valid pod control block %#v", p)
}
if p.parent == nil {
return "", fmt.Errorf("cannot compute pod control block key without parent pointer %#v", p)
}
return controller.KeyFunc(p.parent)
}

View file

@ -0,0 +1,177 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package petset
import (
"fmt"
"net/http/httptest"
"testing"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
)
func newPetClient(client *clientset.Clientset) *apiServerPetClient {
return &apiServerPetClient{
c: client,
}
}
func makeTwoDifferntPCB() (pcb1, pcb2 *pcb) {
userAdded := v1.Volume{
Name: "test",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory},
},
}
ps := newStatefulSet(2)
pcb1, _ = newPCB("1", ps)
pcb2, _ = newPCB("2", ps)
pcb2.pod.Spec.Volumes = append(pcb2.pod.Spec.Volumes, userAdded)
return pcb1, pcb2
}
func TestUpdatePetWithoutRetry(t *testing.T) {
pcb1, pcb2 := makeTwoDifferntPCB()
// invalid pet with empty pod
invalidPcb := *pcb1
invalidPcb.pod = nil
testCases := []struct {
realPet *pcb
expectedPet *pcb
expectErr bool
requests int
}{
// case 0: error occurs, no need to update
{
realPet: pcb1,
expectedPet: &invalidPcb,
expectErr: true,
requests: 0,
},
// case 1: identical pet, no need to update
{
realPet: pcb1,
expectedPet: pcb1,
expectErr: false,
requests: 0,
},
// case 2: need to call update once
{
realPet: pcb1,
expectedPet: pcb2,
expectErr: false,
requests: 1,
},
}
for k, tc := range testCases {
body := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "empty_pod"}})
fakeHandler := utiltesting.FakeHandler{
StatusCode: 200,
ResponseBody: string(body),
}
testServer := httptest.NewServer(&fakeHandler)
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
petClient := newPetClient(client)
err := petClient.Update(tc.realPet, tc.expectedPet)
if tc.expectErr != (err != nil) {
t.Errorf("case %d: expect error(%v), got err: %v", k, tc.expectErr, err)
}
fakeHandler.ValidateRequestCount(t, tc.requests)
testServer.Close()
}
}
func TestUpdatePetWithFailure(t *testing.T) {
fakeHandler := utiltesting.FakeHandler{
StatusCode: 500,
ResponseBody: "{}",
}
testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
petClient := newPetClient(client)
pcb1, pcb2 := makeTwoDifferntPCB()
if err := petClient.Update(pcb1, pcb2); err == nil {
t.Errorf("expect error, got nil")
}
// 1 Update and 1 GET, both of which fail
fakeHandler.ValidateRequestCount(t, 2)
}
func TestUpdatePetRetrySucceed(t *testing.T) {
pcb1, pcb2 := makeTwoDifferntPCB()
fakeClient := &fake.Clientset{}
fakeClient.AddReactor("get", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, pcb2.pod, nil
})
fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("Fake error")
})
petClient := apiServerPetClient{
c: fakeClient,
}
if err := petClient.Update(pcb1, pcb2); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions := fakeClient.Actions()
if len(actions) != 2 {
t.Errorf("Expect 2 actions, got %d actions", len(actions))
}
for i := 0; i < len(actions); i++ {
a := actions[i]
if a.GetResource().Resource != "pods" {
t.Errorf("Unexpected action %+v", a)
continue
}
switch action := a.(type) {
case core.GetAction:
if i%2 == 0 {
t.Errorf("Unexpected Get action")
}
// Make sure the get is for the right pod
if action.GetName() != pcb2.pod.Name {
t.Errorf("Expected get pod %v, got %q instead", pcb2.pod.Name, action.GetName())
}
case core.UpdateAction:
if i%2 == 1 {
t.Errorf("Unexpected Update action")
}
default:
t.Errorf("Unexpected action %+v", a)
break
}
}
}

View file

@ -0,0 +1,71 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"horizontal.go",
"replica_calculator.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/resource:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/autoscaling/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/autoscaling/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/core/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/extensions/v1beta1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/controller/podautoscaler/metrics:go_default_library",
"//pkg/labels:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/util/runtime:go_default_library",
"//pkg/util/sets:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = [
"horizontal_test.go",
"replica_calculator_test.go",
],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/resource:go_default_library",
"//pkg/api/unversioned:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apimachinery/registered:go_default_library",
"//pkg/apis/autoscaling/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/core/v1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/client/restclient:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//pkg/controller/podautoscaler/metrics:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/stretchr/testify/assert",
"//vendor:github.com/stretchr/testify/require",
"//vendor:k8s.io/heapster/metrics/api/v1/types",
"//vendor:k8s.io/heapster/metrics/apis/metrics/v1alpha1",
],
)

Some files were not shown because too many files have changed in this diff Show more