1
0
Fork 0
forked from barak/tarpoon

Add glide.yaml and vendor deps

This commit is contained in:
Dalton Hubble 2016-12-03 22:43:32 -08:00
parent db918f12ad
commit 5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions

92
vendor/k8s.io/kubernetes/pkg/controller/node/BUILD generated vendored Normal file
View file

@ -0,0 +1,92 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"cidr_allocator.go",
"cidr_set.go",
"controller_utils.go",
"doc.go",
"metrics.go",
"nodecontroller.go",
"rate_limited_queue.go",
"test_utils.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/errors:go_default_library",
"//pkg/api/resource:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/core/v1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/controller/informers:go_default_library",
"//pkg/fields:go_default_library",
"//pkg/kubelet/util/format:go_default_library",
"//pkg/labels:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/clock:go_default_library",
"//pkg/util/errors:go_default_library",
"//pkg/util/flowcontrol:go_default_library",
"//pkg/util/metrics:go_default_library",
"//pkg/util/node:go_default_library",
"//pkg/util/runtime:go_default_library",
"//pkg/util/sets:go_default_library",
"//pkg/util/system:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/version:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/golang/glog",
"//vendor:github.com/prometheus/client_golang/prometheus",
],
)
go_test(
name = "go_default_test",
srcs = [
"cidr_allocator_test.go",
"cidr_set_test.go",
"nodecontroller_test.go",
"rate_limited_queue_test.go",
],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/resource:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/fake:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/informers:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/diff:go_default_library",
"//pkg/util/flowcontrol:go_default_library",
"//pkg/util/node:go_default_library",
"//pkg/util/sets:go_default_library",
"//pkg/util/wait:go_default_library",
"//vendor:github.com/golang/glog",
],
)

View file

@ -0,0 +1,266 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"errors"
"fmt"
"net"
"sync"
apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait"
"github.com/golang/glog"
)
// TODO: figure out the good setting for those constants.
const (
// controls how many NodeSpec updates NC can process concurrently.
cidrUpdateWorkers = 10
cidrUpdateQueueSize = 5000
// podCIDRUpdateRetry controls the number of retries of writing Node.Spec.PodCIDR update.
podCIDRUpdateRetry = 5
)
var errCIDRRangeNoCIDRsRemaining = errors.New("CIDR allocation failed; there are no remaining CIDRs left to allocate in the accepted range")
type nodeAndCIDR struct {
cidr *net.IPNet
nodeName string
}
// CIDRAllocator is an interface implemented by things that know how to allocate/occupy/recycle CIDR for nodes.
type CIDRAllocator interface {
AllocateOrOccupyCIDR(node *v1.Node) error
ReleaseCIDR(node *v1.Node) error
}
type rangeAllocator struct {
client clientset.Interface
cidrs *cidrSet
clusterCIDR *net.IPNet
maxCIDRs int
// Channel that is used to pass updating Nodes with assigned CIDRs to the background
// This increases a throughput of CIDR assignment by not blocking on long operations.
nodeCIDRUpdateChannel chan nodeAndCIDR
recorder record.EventRecorder
// Keep a set of nodes that are currectly being processed to avoid races in CIDR allocation
sync.Mutex
nodesInProcessing sets.String
}
// NewCIDRRangeAllocator returns a CIDRAllocator to allocate CIDR for node
// Caller must ensure subNetMaskSize is not less than cluster CIDR mask size.
// Caller must always pass in a list of existing nodes so the new allocator
// can initialize its CIDR map. NodeList is only nil in testing.
func NewCIDRRangeAllocator(client clientset.Interface, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) {
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(v1.EventSource{Component: "cidrAllocator"})
eventBroadcaster.StartLogging(glog.Infof)
ra := &rangeAllocator{
client: client,
cidrs: newCIDRSet(clusterCIDR, subNetMaskSize),
clusterCIDR: clusterCIDR,
nodeCIDRUpdateChannel: make(chan nodeAndCIDR, cidrUpdateQueueSize),
recorder: recorder,
nodesInProcessing: sets.NewString(),
}
if serviceCIDR != nil {
ra.filterOutServiceRange(serviceCIDR)
} else {
glog.V(0).Info("No Service CIDR provided. Skipping filtering out service addresses.")
}
if nodeList != nil {
for _, node := range nodeList.Items {
if node.Spec.PodCIDR == "" {
glog.Infof("Node %v has no CIDR, ignoring", node.Name)
continue
} else {
glog.Infof("Node %v has CIDR %s, occupying it in CIDR map", node.Name, node.Spec.PodCIDR)
}
if err := ra.occupyCIDR(&node); err != nil {
// This will happen if:
// 1. We find garbage in the podCIDR field. Retrying is useless.
// 2. CIDR out of range: This means a node CIDR has changed.
// This error will keep crashing controller-manager.
return nil, err
}
}
}
for i := 0; i < cidrUpdateWorkers; i++ {
go func(stopChan <-chan struct{}) {
for {
select {
case workItem, ok := <-ra.nodeCIDRUpdateChannel:
if !ok {
glog.Warning("NodeCIDRUpdateChannel read returned false.")
return
}
ra.updateCIDRAllocation(workItem)
case <-stopChan:
return
}
}
}(wait.NeverStop)
}
return ra, nil
}
func (r *rangeAllocator) insertNodeToProcessing(nodeName string) bool {
r.Lock()
defer r.Unlock()
if r.nodesInProcessing.Has(nodeName) {
return false
}
r.nodesInProcessing.Insert(nodeName)
return true
}
func (r *rangeAllocator) removeNodeFromProcessing(nodeName string) {
r.Lock()
defer r.Unlock()
r.nodesInProcessing.Delete(nodeName)
}
func (r *rangeAllocator) occupyCIDR(node *v1.Node) error {
defer r.removeNodeFromProcessing(node.Name)
if node.Spec.PodCIDR == "" {
return nil
}
_, podCIDR, err := net.ParseCIDR(node.Spec.PodCIDR)
if err != nil {
return fmt.Errorf("failed to parse node %s, CIDR %s", node.Name, node.Spec.PodCIDR)
}
if err := r.cidrs.occupy(podCIDR); err != nil {
return fmt.Errorf("failed to mark cidr as occupied: %v", err)
}
return nil
}
// AllocateOrOccupyCIDR looks at the given node, assigns it a valid CIDR
// if it doesn't currently have one or mark the CIDR as used if the node already have one.
// WARNING: If you're adding any return calls or defer any more work from this function
// you have to handle correctly nodesInProcessing.
func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
if node == nil {
return nil
}
if !r.insertNodeToProcessing(node.Name) {
glog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name)
return nil
}
if node.Spec.PodCIDR != "" {
return r.occupyCIDR(node)
}
podCIDR, err := r.cidrs.allocateNext()
if err != nil {
r.removeNodeFromProcessing(node.Name)
recordNodeStatusChange(r.recorder, node, "CIDRNotAvailable")
return fmt.Errorf("failed to allocate cidr: %v", err)
}
glog.V(10).Infof("Putting node %s with CIDR %s into the work queue", node.Name, podCIDR)
r.nodeCIDRUpdateChannel <- nodeAndCIDR{
nodeName: node.Name,
cidr: podCIDR,
}
return nil
}
// ReleaseCIDR releases the CIDR of the removed node
func (r *rangeAllocator) ReleaseCIDR(node *v1.Node) error {
if node == nil || node.Spec.PodCIDR == "" {
return nil
}
_, podCIDR, err := net.ParseCIDR(node.Spec.PodCIDR)
if err != nil {
return fmt.Errorf("Failed to parse CIDR %s on Node %v: %v", node.Spec.PodCIDR, node.Name, err)
}
glog.V(4).Infof("release CIDR %s", node.Spec.PodCIDR)
if err = r.cidrs.release(podCIDR); err != nil {
return fmt.Errorf("Error when releasing CIDR %v: %v", node.Spec.PodCIDR, err)
}
return err
}
// Marks all CIDRs with subNetMaskSize that belongs to serviceCIDR as used,
// so that they won't be assignable.
func (r *rangeAllocator) filterOutServiceRange(serviceCIDR *net.IPNet) {
// Checks if service CIDR has a nonempty intersection with cluster CIDR. It is the case if either
// clusterCIDR contains serviceCIDR with clusterCIDR's Mask applied (this means that clusterCIDR contains serviceCIDR)
// or vice versa (which means that serviceCIDR contains clusterCIDR).
if !r.clusterCIDR.Contains(serviceCIDR.IP.Mask(r.clusterCIDR.Mask)) && !serviceCIDR.Contains(r.clusterCIDR.IP.Mask(serviceCIDR.Mask)) {
return
}
if err := r.cidrs.occupy(serviceCIDR); err != nil {
glog.Errorf("Error filtering out service cidr %v: %v", serviceCIDR, err)
}
}
// Assigns CIDR to Node and sends an update to the API server.
func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error {
var err error
var node *v1.Node
defer r.removeNodeFromProcessing(data.nodeName)
for rep := 0; rep < podCIDRUpdateRetry; rep++ {
// TODO: change it to using PATCH instead of full Node updates.
node, err = r.client.Core().Nodes().Get(data.nodeName)
if err != nil {
glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", data.nodeName, err)
continue
}
if node.Spec.PodCIDR != "" {
glog.Errorf("Node %v already has allocated CIDR %v. Releasing assigned one if different.", node.Name, node.Spec.PodCIDR)
if node.Spec.PodCIDR != data.cidr.String() {
if err := r.cidrs.release(data.cidr); err != nil {
glog.Errorf("Error when releasing CIDR %v", data.cidr.String())
}
}
return nil
}
node.Spec.PodCIDR = data.cidr.String()
if _, err := r.client.Core().Nodes().Update(node); err != nil {
glog.Errorf("Failed while updating Node.Spec.PodCIDR (%d retries left): %v", podCIDRUpdateRetry-rep-1, err)
} else {
break
}
}
if err != nil {
recordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed")
// We accept the fact that we may leek CIDRs here. This is safer than releasing
// them in case when we don't know if request went through.
// NodeController restart will return all falsely allocated CIDRs to the pool.
if !apierrors.IsServerTimeout(err) {
glog.Errorf("CIDR assignment for node %v failed: %v. Releasing allocated CIDR", data.nodeName, err)
if releaseErr := r.cidrs.release(data.cidr); releaseErr != nil {
glog.Errorf("Error releasing allocated CIDR for node %v: %v", data.nodeName, releaseErr)
}
}
}
return err
}

View file

@ -0,0 +1,396 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"net"
"testing"
"time"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/util/wait"
)
const (
nodePollInterval = 100 * time.Millisecond
)
func waitForUpdatedNodeWithTimeout(nodeHandler *FakeNodeHandler, number int, timeout time.Duration) error {
return wait.Poll(nodePollInterval, timeout, func() (bool, error) {
if len(nodeHandler.getUpdatedNodesCopy()) >= number {
return true, nil
}
return false, nil
})
}
func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
testCases := []struct {
description string
fakeNodeHandler *FakeNodeHandler
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
subNetMaskSize int
expectedAllocatedCIDR string
allocatedCIDRs []string
}{
{
description: "When there's no ServiceCIDR return first CIDR in range",
fakeNodeHandler: &FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
Name: "node0",
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDR: func() *net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24")
return clusterCIDR
}(),
serviceCIDR: nil,
subNetMaskSize: 30,
expectedAllocatedCIDR: "127.123.234.0/30",
},
{
description: "Correctly filter out ServiceCIDR",
fakeNodeHandler: &FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
Name: "node0",
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDR: func() *net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24")
return clusterCIDR
}(),
serviceCIDR: func() *net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/26")
return clusterCIDR
}(),
subNetMaskSize: 30,
// it should return first /30 CIDR after service range
expectedAllocatedCIDR: "127.123.234.64/30",
},
{
description: "Correctly ignore already allocated CIDRs",
fakeNodeHandler: &FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
Name: "node0",
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDR: func() *net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24")
return clusterCIDR
}(),
serviceCIDR: func() *net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/26")
return clusterCIDR
}(),
subNetMaskSize: 30,
allocatedCIDRs: []string{"127.123.234.64/30", "127.123.234.68/30", "127.123.234.72/30", "127.123.234.80/30"},
expectedAllocatedCIDR: "127.123.234.76/30",
},
}
testFunc := func(tc struct {
description string
fakeNodeHandler *FakeNodeHandler
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
subNetMaskSize int
expectedAllocatedCIDR string
allocatedCIDRs []string
}) {
allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, tc.clusterCIDR, tc.serviceCIDR, tc.subNetMaskSize, nil)
// this is a bit of white box testing
for _, allocated := range tc.allocatedCIDRs {
_, cidr, err := net.ParseCIDR(allocated)
if err != nil {
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
}
rangeAllocator, ok := allocator.(*rangeAllocator)
if !ok {
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
return
}
if err = rangeAllocator.cidrs.occupy(cidr); err != nil {
t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err)
}
}
if err := allocator.AllocateOrOccupyCIDR(tc.fakeNodeHandler.Existing[0]); err != nil {
t.Errorf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
}
if err := waitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil {
t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err)
}
found := false
seenCIDRs := []string{}
for _, updatedNode := range tc.fakeNodeHandler.getUpdatedNodesCopy() {
seenCIDRs = append(seenCIDRs, updatedNode.Spec.PodCIDR)
if updatedNode.Spec.PodCIDR == tc.expectedAllocatedCIDR {
found = true
break
}
}
if !found {
t.Errorf("%v: Unable to find allocated CIDR %v, found updated Nodes with CIDRs: %v",
tc.description, tc.expectedAllocatedCIDR, seenCIDRs)
}
}
for _, tc := range testCases {
testFunc(tc)
}
}
func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
testCases := []struct {
description string
fakeNodeHandler *FakeNodeHandler
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
subNetMaskSize int
allocatedCIDRs []string
}{
{
description: "When there's no ServiceCIDR return first CIDR in range",
fakeNodeHandler: &FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
Name: "node0",
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDR: func() *net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28")
return clusterCIDR
}(),
serviceCIDR: nil,
subNetMaskSize: 30,
allocatedCIDRs: []string{"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"},
},
}
testFunc := func(tc struct {
description string
fakeNodeHandler *FakeNodeHandler
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
subNetMaskSize int
allocatedCIDRs []string
}) {
allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, tc.clusterCIDR, tc.serviceCIDR, tc.subNetMaskSize, nil)
// this is a bit of white box testing
for _, allocated := range tc.allocatedCIDRs {
_, cidr, err := net.ParseCIDR(allocated)
if err != nil {
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
}
rangeAllocator, ok := allocator.(*rangeAllocator)
if !ok {
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
return
}
err = rangeAllocator.cidrs.occupy(cidr)
if err != nil {
t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err)
}
}
if err := allocator.AllocateOrOccupyCIDR(tc.fakeNodeHandler.Existing[0]); err == nil {
t.Errorf("%v: unexpected success in AllocateOrOccupyCIDR: %v", tc.description, err)
}
// We don't expect any updates, so just sleep for some time
time.Sleep(time.Second)
if len(tc.fakeNodeHandler.getUpdatedNodesCopy()) != 0 {
t.Fatalf("%v: unexpected update of nodes: %v", tc.description, tc.fakeNodeHandler.getUpdatedNodesCopy())
}
seenCIDRs := []string{}
for _, updatedNode := range tc.fakeNodeHandler.getUpdatedNodesCopy() {
if updatedNode.Spec.PodCIDR != "" {
seenCIDRs = append(seenCIDRs, updatedNode.Spec.PodCIDR)
}
}
if len(seenCIDRs) != 0 {
t.Errorf("%v: Seen assigned CIDRs when not expected: %v",
tc.description, seenCIDRs)
}
}
for _, tc := range testCases {
testFunc(tc)
}
}
func TestReleaseCIDRSuccess(t *testing.T) {
testCases := []struct {
description string
fakeNodeHandler *FakeNodeHandler
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
subNetMaskSize int
expectedAllocatedCIDRFirstRound string
expectedAllocatedCIDRSecondRound string
allocatedCIDRs []string
cidrsToRelease []string
}{
{
description: "Correctly release preallocated CIDR",
fakeNodeHandler: &FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
Name: "node0",
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDR: func() *net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28")
return clusterCIDR
}(),
serviceCIDR: nil,
subNetMaskSize: 30,
allocatedCIDRs: []string{"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"},
expectedAllocatedCIDRFirstRound: "",
cidrsToRelease: []string{"127.123.234.4/30"},
expectedAllocatedCIDRSecondRound: "127.123.234.4/30",
},
{
description: "Correctly recycle CIDR",
fakeNodeHandler: &FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
Name: "node0",
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDR: func() *net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28")
return clusterCIDR
}(),
serviceCIDR: nil,
subNetMaskSize: 30,
expectedAllocatedCIDRFirstRound: "127.123.234.0/30",
cidrsToRelease: []string{"127.123.234.0/30"},
expectedAllocatedCIDRSecondRound: "127.123.234.0/30",
},
}
testFunc := func(tc struct {
description string
fakeNodeHandler *FakeNodeHandler
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
subNetMaskSize int
expectedAllocatedCIDRFirstRound string
expectedAllocatedCIDRSecondRound string
allocatedCIDRs []string
cidrsToRelease []string
}) {
allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, tc.clusterCIDR, tc.serviceCIDR, tc.subNetMaskSize, nil)
// this is a bit of white box testing
for _, allocated := range tc.allocatedCIDRs {
_, cidr, err := net.ParseCIDR(allocated)
if err != nil {
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
}
rangeAllocator, ok := allocator.(*rangeAllocator)
if !ok {
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
return
}
err = rangeAllocator.cidrs.occupy(cidr)
if err != nil {
t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err)
}
}
err := allocator.AllocateOrOccupyCIDR(tc.fakeNodeHandler.Existing[0])
if tc.expectedAllocatedCIDRFirstRound != "" {
if err != nil {
t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
}
if err := waitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil {
t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err)
}
} else {
if err == nil {
t.Fatalf("%v: unexpected success in AllocateOrOccupyCIDR: %v", tc.description, err)
}
// We don't expect any updates here
time.Sleep(time.Second)
if len(tc.fakeNodeHandler.getUpdatedNodesCopy()) != 0 {
t.Fatalf("%v: unexpected update of nodes: %v", tc.description, tc.fakeNodeHandler.getUpdatedNodesCopy())
}
}
for _, cidrToRelease := range tc.cidrsToRelease {
nodeToRelease := v1.Node{
ObjectMeta: v1.ObjectMeta{
Name: "node0",
},
}
nodeToRelease.Spec.PodCIDR = cidrToRelease
err = allocator.ReleaseCIDR(&nodeToRelease)
if err != nil {
t.Fatalf("%v: unexpected error in ReleaseCIDR: %v", tc.description, err)
}
}
if err = allocator.AllocateOrOccupyCIDR(tc.fakeNodeHandler.Existing[0]); err != nil {
t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
}
if err := waitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil {
t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err)
}
found := false
seenCIDRs := []string{}
for _, updatedNode := range tc.fakeNodeHandler.getUpdatedNodesCopy() {
seenCIDRs = append(seenCIDRs, updatedNode.Spec.PodCIDR)
if updatedNode.Spec.PodCIDR == tc.expectedAllocatedCIDRSecondRound {
found = true
break
}
}
if !found {
t.Errorf("%v: Unable to find allocated CIDR %v, found updated Nodes with CIDRs: %v",
tc.description, tc.expectedAllocatedCIDRSecondRound, seenCIDRs)
}
}
for _, tc := range testCases {
testFunc(tc)
}
}

View file

@ -0,0 +1,150 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"encoding/binary"
"fmt"
"math/big"
"net"
"sync"
)
type cidrSet struct {
sync.Mutex
clusterCIDR *net.IPNet
clusterIP net.IP
clusterMaskSize int
maxCIDRs int
nextCandidate int
used big.Int
subNetMaskSize int
}
func newCIDRSet(clusterCIDR *net.IPNet, subNetMaskSize int) *cidrSet {
clusterMask := clusterCIDR.Mask
clusterMaskSize, _ := clusterMask.Size()
maxCIDRs := 1 << uint32(subNetMaskSize-clusterMaskSize)
return &cidrSet{
clusterCIDR: clusterCIDR,
clusterIP: clusterCIDR.IP.To4(),
clusterMaskSize: clusterMaskSize,
maxCIDRs: maxCIDRs,
subNetMaskSize: subNetMaskSize,
}
}
func (s *cidrSet) allocateNext() (*net.IPNet, error) {
s.Lock()
defer s.Unlock()
nextUnused := -1
for i := 0; i < s.maxCIDRs; i++ {
candidate := (i + s.nextCandidate) % s.maxCIDRs
if s.used.Bit(candidate) == 0 {
nextUnused = candidate
break
}
}
if nextUnused == -1 {
return nil, errCIDRRangeNoCIDRsRemaining
}
s.nextCandidate = (nextUnused + 1) % s.maxCIDRs
s.used.SetBit(&s.used, nextUnused, 1)
j := uint32(nextUnused) << uint32(32-s.subNetMaskSize)
ipInt := (binary.BigEndian.Uint32(s.clusterIP)) | j
ip := make([]byte, 4)
binary.BigEndian.PutUint32(ip, ipInt)
return &net.IPNet{
IP: ip,
Mask: net.CIDRMask(s.subNetMaskSize, 32),
}, nil
}
func (s *cidrSet) getBeginingAndEndIndices(cidr *net.IPNet) (begin, end int, err error) {
begin, end = 0, s.maxCIDRs
cidrMask := cidr.Mask
maskSize, _ := cidrMask.Size()
if !s.clusterCIDR.Contains(cidr.IP.Mask(s.clusterCIDR.Mask)) && !cidr.Contains(s.clusterCIDR.IP.Mask(cidr.Mask)) {
return -1, -1, fmt.Errorf("cidr %v is out the range of cluster cidr %v", cidr, s.clusterCIDR)
}
if s.clusterMaskSize < maskSize {
subNetMask := net.CIDRMask(s.subNetMaskSize, 32)
begin, err = s.getIndexForCIDR(&net.IPNet{
IP: cidr.IP.To4().Mask(subNetMask),
Mask: subNetMask,
})
if err != nil {
return -1, -1, err
}
ip := make([]byte, 4)
ipInt := binary.BigEndian.Uint32(cidr.IP) | (^binary.BigEndian.Uint32(cidr.Mask))
binary.BigEndian.PutUint32(ip, ipInt)
end, err = s.getIndexForCIDR(&net.IPNet{
IP: net.IP(ip).To4().Mask(subNetMask),
Mask: subNetMask,
})
if err != nil {
return -1, -1, err
}
}
return begin, end, nil
}
func (s *cidrSet) release(cidr *net.IPNet) error {
begin, end, err := s.getBeginingAndEndIndices(cidr)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
for i := begin; i <= end; i++ {
s.used.SetBit(&s.used, i, 0)
}
return nil
}
func (s *cidrSet) occupy(cidr *net.IPNet) (err error) {
begin, end, err := s.getBeginingAndEndIndices(cidr)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
for i := begin; i <= end; i++ {
s.used.SetBit(&s.used, i, 1)
}
return nil
}
func (s *cidrSet) getIndexForCIDR(cidr *net.IPNet) (int, error) {
cidrIndex := (binary.BigEndian.Uint32(s.clusterIP) ^ binary.BigEndian.Uint32(cidr.IP.To4())) >> uint32(32-s.subNetMaskSize)
if cidrIndex >= uint32(s.maxCIDRs) {
return 0, fmt.Errorf("CIDR: %v is out of the range of CIDR allocator", cidr)
}
return int(cidrIndex), nil
}

View file

@ -0,0 +1,336 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"math/big"
"net"
"reflect"
"testing"
"github.com/golang/glog"
)
func TestCIDRSetFullyAllocated(t *testing.T) {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/30")
a := newCIDRSet(clusterCIDR, 30)
p, err := a.allocateNext()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if p.String() != "127.123.234.0/30" {
t.Fatalf("unexpected allocated cidr: %s", p.String())
}
_, err = a.allocateNext()
if err == nil {
t.Fatalf("expected error because of fully-allocated range")
}
a.release(p)
p, err = a.allocateNext()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if p.String() != "127.123.234.0/30" {
t.Fatalf("unexpected allocated cidr: %s", p.String())
}
_, err = a.allocateNext()
if err == nil {
t.Fatalf("expected error because of fully-allocated range")
}
}
func TestCIDRSet_RandomishAllocation(t *testing.T) {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/16")
a := newCIDRSet(clusterCIDR, 24)
// allocate all the CIDRs
var err error
cidrs := make([]*net.IPNet, 256)
for i := 0; i < 256; i++ {
cidrs[i], err = a.allocateNext()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
}
_, err = a.allocateNext()
if err == nil {
t.Fatalf("expected error because of fully-allocated range")
}
// release them all
for i := 0; i < 256; i++ {
a.release(cidrs[i])
}
// allocate the CIDRs again
rcidrs := make([]*net.IPNet, 256)
for i := 0; i < 256; i++ {
rcidrs[i], err = a.allocateNext()
if err != nil {
t.Fatalf("unexpected error: %d, %v", i, err)
}
}
_, err = a.allocateNext()
if err == nil {
t.Fatalf("expected error because of fully-allocated range")
}
if !reflect.DeepEqual(cidrs, rcidrs) {
t.Fatalf("expected re-allocated cidrs are the same collection")
}
}
func TestCIDRSet_AllocationOccupied(t *testing.T) {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/16")
a := newCIDRSet(clusterCIDR, 24)
// allocate all the CIDRs
var err error
cidrs := make([]*net.IPNet, 256)
for i := 0; i < 256; i++ {
cidrs[i], err = a.allocateNext()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
}
_, err = a.allocateNext()
if err == nil {
t.Fatalf("expected error because of fully-allocated range")
}
// release them all
for i := 0; i < 256; i++ {
a.release(cidrs[i])
}
// occupy the last 128 CIDRs
for i := 128; i < 256; i++ {
a.occupy(cidrs[i])
}
// allocate the first 128 CIDRs again
rcidrs := make([]*net.IPNet, 128)
for i := 0; i < 128; i++ {
rcidrs[i], err = a.allocateNext()
if err != nil {
t.Fatalf("unexpected error: %d, %v", i, err)
}
}
_, err = a.allocateNext()
if err == nil {
t.Fatalf("expected error because of fully-allocated range")
}
// check Occupy() work properly
for i := 128; i < 256; i++ {
rcidrs = append(rcidrs, cidrs[i])
}
if !reflect.DeepEqual(cidrs, rcidrs) {
t.Fatalf("expected re-allocated cidrs are the same collection")
}
}
func TestGetBitforCIDR(t *testing.T) {
cases := []struct {
clusterCIDRStr string
subNetMaskSize int
subNetCIDRStr string
expectedBit int
expectErr bool
}{
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 16,
subNetCIDRStr: "127.0.0.0/16",
expectedBit: 0,
expectErr: false,
},
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 16,
subNetCIDRStr: "127.123.0.0/16",
expectedBit: 123,
expectErr: false,
},
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 16,
subNetCIDRStr: "127.168.0.0/16",
expectedBit: 168,
expectErr: false,
},
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 16,
subNetCIDRStr: "127.224.0.0/16",
expectedBit: 224,
expectErr: false,
},
{
clusterCIDRStr: "192.168.0.0/16",
subNetMaskSize: 24,
subNetCIDRStr: "192.168.12.0/24",
expectedBit: 12,
expectErr: false,
},
{
clusterCIDRStr: "192.168.0.0/16",
subNetMaskSize: 24,
subNetCIDRStr: "192.168.151.0/24",
expectedBit: 151,
expectErr: false,
},
{
clusterCIDRStr: "192.168.0.0/16",
subNetMaskSize: 24,
subNetCIDRStr: "127.168.224.0/24",
expectErr: true,
},
}
for _, tc := range cases {
_, clusterCIDR, err := net.ParseCIDR(tc.clusterCIDRStr)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
cs := newCIDRSet(clusterCIDR, tc.subNetMaskSize)
_, subnetCIDR, err := net.ParseCIDR(tc.subNetCIDRStr)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
got, err := cs.getIndexForCIDR(subnetCIDR)
if err == nil && tc.expectErr {
glog.Errorf("expected error but got null")
continue
}
if err != nil && !tc.expectErr {
glog.Errorf("unexpected error: %v", err)
continue
}
if got != tc.expectedBit {
glog.Errorf("expected %v, but got %v", tc.expectedBit, got)
}
}
}
func TestOccupy(t *testing.T) {
cases := []struct {
clusterCIDRStr string
subNetMaskSize int
subNetCIDRStr string
expectedUsedBegin int
expectedUsedEnd int
expectErr bool
}{
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 16,
subNetCIDRStr: "127.0.0.0/8",
expectedUsedBegin: 0,
expectedUsedEnd: 256,
expectErr: false,
},
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 16,
subNetCIDRStr: "127.0.0.0/2",
expectedUsedBegin: 0,
expectedUsedEnd: 256,
expectErr: false,
},
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 16,
subNetCIDRStr: "127.0.0.0/16",
expectedUsedBegin: 0,
expectedUsedEnd: 0,
expectErr: false,
},
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 32,
subNetCIDRStr: "127.0.0.0/16",
expectedUsedBegin: 0,
expectedUsedEnd: 65535,
expectErr: false,
},
{
clusterCIDRStr: "127.0.0.0/7",
subNetMaskSize: 16,
subNetCIDRStr: "127.0.0.0/15",
expectedUsedBegin: 256,
expectedUsedEnd: 257,
expectErr: false,
},
{
clusterCIDRStr: "127.0.0.0/7",
subNetMaskSize: 15,
subNetCIDRStr: "127.0.0.0/15",
expectedUsedBegin: 128,
expectedUsedEnd: 128,
expectErr: false,
},
{
clusterCIDRStr: "127.0.0.0/7",
subNetMaskSize: 18,
subNetCIDRStr: "127.0.0.0/15",
expectedUsedBegin: 1024,
expectedUsedEnd: 1031,
expectErr: false,
},
}
for _, tc := range cases {
_, clusterCIDR, err := net.ParseCIDR(tc.clusterCIDRStr)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
cs := newCIDRSet(clusterCIDR, tc.subNetMaskSize)
_, subnetCIDR, err := net.ParseCIDR(tc.subNetCIDRStr)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
err = cs.occupy(subnetCIDR)
if err == nil && tc.expectErr {
t.Errorf("expected error but got none")
continue
}
if err != nil && !tc.expectErr {
t.Errorf("unexpected error: %v", err)
continue
}
expectedUsed := big.Int{}
for i := tc.expectedUsedBegin; i <= tc.expectedUsedEnd; i++ {
expectedUsed.SetBit(&expectedUsed, i, 1)
}
if expectedUsed.Cmp(&cs.used) != 0 {
t.Errorf("error")
}
}
}

View file

@ -0,0 +1,290 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"fmt"
"strings"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/types"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
"k8s.io/kubernetes/pkg/util/node"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/version"
"github.com/golang/glog"
)
const (
// Number of Nodes that needs to be in the cluster for it to be treated as "large"
LargeClusterThreshold = 20
)
// deletePods will delete all pods from master running on given node, and return true
// if any pods were deleted, or were found pending deletion.
func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, nodeName, nodeUID string, daemonStore cache.StoreToDaemonSetLister) (bool, error) {
remaining := false
selector := fields.OneTermEqualSelector(api.PodHostField, nodeName).String()
options := v1.ListOptions{FieldSelector: selector}
pods, err := kubeClient.Core().Pods(v1.NamespaceAll).List(options)
var updateErrList []error
if err != nil {
return remaining, err
}
if len(pods.Items) > 0 {
recordNodeEvent(recorder, nodeName, nodeUID, v1.EventTypeNormal, "DeletingAllPods", fmt.Sprintf("Deleting all Pods from Node %v.", nodeName))
}
for _, pod := range pods.Items {
// Defensive check, also needed for tests.
if pod.Spec.NodeName != nodeName {
continue
}
// Set reason and message in the pod object.
if _, err = setPodTerminationReason(kubeClient, &pod, nodeName); err != nil {
if errors.IsConflict(err) {
updateErrList = append(updateErrList,
fmt.Errorf("update status failed for pod %q: %v", format.Pod(&pod), err))
continue
}
}
// if the pod has already been marked for deletion, we still return true that there are remaining pods.
if pod.DeletionGracePeriodSeconds != nil {
remaining = true
continue
}
// if the pod is managed by a daemonset, ignore it
_, err := daemonStore.GetPodDaemonSets(&pod)
if err == nil { // No error means at least one daemonset was found
continue
}
glog.V(2).Infof("Starting deletion of pod %v", pod.Name)
recorder.Eventf(&pod, v1.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName)
if err := kubeClient.Core().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
return false, err
}
remaining = true
}
if len(updateErrList) > 0 {
return false, utilerrors.NewAggregate(updateErrList)
}
return remaining, nil
}
// setPodTerminationReason attempts to set a reason and message in the pod status, updates it in the apiserver,
// and returns an error if it encounters one.
func setPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeName string) (*v1.Pod, error) {
if pod.Status.Reason == node.NodeUnreachablePodReason {
return pod, nil
}
pod.Status.Reason = node.NodeUnreachablePodReason
pod.Status.Message = fmt.Sprintf(node.NodeUnreachablePodMessage, nodeName, pod.Name)
var updatedPod *v1.Pod
var err error
if updatedPod, err = kubeClient.Core().Pods(pod.Namespace).UpdateStatus(pod); err != nil {
return nil, err
}
return updatedPod, nil
}
func forcefullyDeletePod(c clientset.Interface, pod *v1.Pod) error {
var zero int64
glog.Infof("NodeController is force deleting Pod: %v:%v", pod.Namespace, pod.Name)
err := c.Core().Pods(pod.Namespace).Delete(pod.Name, &v1.DeleteOptions{GracePeriodSeconds: &zero})
if err == nil {
glog.V(4).Infof("forceful deletion of %s succeeded", pod.Name)
}
return err
}
// forcefullyDeleteNode immediately the node. The pods on the node are cleaned
// up by the podGC.
func forcefullyDeleteNode(kubeClient clientset.Interface, nodeName string) error {
if err := kubeClient.Core().Nodes().Delete(nodeName, nil); err != nil {
return fmt.Errorf("unable to delete node %q: %v", nodeName, err)
}
return nil
}
// maybeDeleteTerminatingPod non-gracefully deletes pods that are terminating
// that should not be gracefully terminated.
func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) {
pod, ok := obj.(*v1.Pod)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("Couldn't get object from tombstone %#v", obj)
return
}
pod, ok = tombstone.Obj.(*v1.Pod)
if !ok {
glog.Errorf("Tombstone contained object that is not a Pod %#v", obj)
return
}
}
// consider only terminating pods
if pod.DeletionTimestamp == nil {
return
}
nodeObj, found, err := nc.nodeStore.Store.GetByKey(pod.Spec.NodeName)
if err != nil {
// this can only happen if the Store.KeyFunc has a problem creating
// a key for the pod. If it happens once, it will happen again so
// don't bother requeuing the pod.
utilruntime.HandleError(err)
return
}
// if there is no such node, do nothing and let the podGC clean it up.
if !found {
return
}
// delete terminating pods that have been scheduled on
// nodes that do not support graceful termination
// TODO(mikedanese): this can be removed when we no longer
// guarantee backwards compatibility of master API to kubelets with
// versions less than 1.1.0
node := nodeObj.(*v1.Node)
v, err := version.Parse(node.Status.NodeInfo.KubeletVersion)
if err != nil {
glog.V(0).Infof("couldn't parse verions %q of minion: %v", node.Status.NodeInfo.KubeletVersion, err)
utilruntime.HandleError(nc.forcefullyDeletePod(pod))
return
}
if gracefulDeletionVersion.GT(v) {
utilruntime.HandleError(nc.forcefullyDeletePod(pod))
return
}
}
// update ready status of all pods running on given node from master
// return true if success
func markAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error {
// Don't set pods to NotReady if the kubelet is running a version that
// doesn't understand how to correct readiness.
// TODO: Remove this check when we no longer guarantee backward compatibility
// with node versions < 1.2.0.
if nodeRunningOutdatedKubelet(node) {
return nil
}
nodeName := node.Name
glog.V(2).Infof("Update ready status of pods on node [%v]", nodeName)
opts := v1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, nodeName).String()}
pods, err := kubeClient.Core().Pods(v1.NamespaceAll).List(opts)
if err != nil {
return err
}
errMsg := []string{}
for _, pod := range pods.Items {
// Defensive check, also needed for tests.
if pod.Spec.NodeName != nodeName {
continue
}
for i, cond := range pod.Status.Conditions {
if cond.Type == v1.PodReady {
pod.Status.Conditions[i].Status = v1.ConditionFalse
glog.V(2).Infof("Updating ready status of pod %v to false", pod.Name)
_, err := kubeClient.Core().Pods(pod.Namespace).UpdateStatus(&pod)
if err != nil {
glog.Warningf("Failed to update status for pod %q: %v", format.Pod(&pod), err)
errMsg = append(errMsg, fmt.Sprintf("%v", err))
}
break
}
}
}
if len(errMsg) == 0 {
return nil
}
return fmt.Errorf("%v", strings.Join(errMsg, "; "))
}
// nodeRunningOutdatedKubelet returns true if the kubeletVersion reported
// in the nodeInfo of the given node is "outdated", meaning < 1.2.0.
// Older versions were inflexible and modifying pod.Status directly through
// the apiserver would result in unexpected outcomes.
func nodeRunningOutdatedKubelet(node *v1.Node) bool {
v, err := version.Parse(node.Status.NodeInfo.KubeletVersion)
if err != nil {
glog.Errorf("couldn't parse version %q of node %v", node.Status.NodeInfo.KubeletVersion, err)
return true
}
if podStatusReconciliationVersion.GT(v) {
glog.Infof("Node %v running kubelet at (%v) which is less than the minimum version that allows nodecontroller to mark pods NotReady (%v).", node.Name, v, podStatusReconciliationVersion)
return true
}
return false
}
func nodeExistsInCloudProvider(cloud cloudprovider.Interface, nodeName types.NodeName) (bool, error) {
instances, ok := cloud.Instances()
if !ok {
return false, fmt.Errorf("%v", ErrCloudInstance)
}
if _, err := instances.ExternalID(nodeName); err != nil {
if err == cloudprovider.InstanceNotFound {
return false, nil
}
return false, err
}
return true, nil
}
func recordNodeEvent(recorder record.EventRecorder, nodeName, nodeUID, eventtype, reason, event string) {
ref := &v1.ObjectReference{
Kind: "Node",
Name: nodeName,
UID: types.UID(nodeUID),
Namespace: "",
}
glog.V(2).Infof("Recording %s event message for node %s", event, nodeName)
recorder.Eventf(ref, eventtype, reason, "Node %s event: %s", nodeName, event)
}
func recordNodeStatusChange(recorder record.EventRecorder, node *v1.Node, new_status string) {
ref := &v1.ObjectReference{
Kind: "Node",
Name: node.Name,
UID: node.UID,
Namespace: "",
}
glog.V(2).Infof("Recording status change %s event message for node %s", new_status, node.Name)
// TODO: This requires a transaction, either both node status is updated
// and event is recorded or neither should happen, see issue #6055.
recorder.Eventf(ref, v1.EventTypeNormal, new_status, "Node %s status is now: %s", node.Name, new_status)
}

19
vendor/k8s.io/kubernetes/pkg/controller/node/doc.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package node contains code for syncing cloud instances with
// node registry
package node // import "k8s.io/kubernetes/pkg/controller/node"

View file

@ -0,0 +1,77 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
)
const (
NodeControllerSubsystem = "node_collector"
ZoneHealthStatisticKey = "zone_health"
ZoneSizeKey = "zone_size"
ZoneNoUnhealthyNodesKey = "unhealty_nodes_in_zone"
EvictionsNumberKey = "evictions_number"
)
var (
ZoneHealth = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: NodeControllerSubsystem,
Name: ZoneHealthStatisticKey,
Help: "Gauge measuring percentage of healty nodes per zone.",
},
[]string{"zone"},
)
ZoneSize = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: NodeControllerSubsystem,
Name: ZoneSizeKey,
Help: "Gauge measuring number of registered Nodes per zones.",
},
[]string{"zone"},
)
UnhealthyNodes = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: NodeControllerSubsystem,
Name: ZoneNoUnhealthyNodesKey,
Help: "Gauge measuring number of not Ready Nodes per zones.",
},
[]string{"zone"},
)
EvictionsNumber = prometheus.NewCounterVec(
prometheus.CounterOpts{
Subsystem: NodeControllerSubsystem,
Name: EvictionsNumberKey,
Help: "Number of Node evictions that happened since current instance of NodeController started.",
},
[]string{"zone"},
)
)
var registerMetrics sync.Once
func Register() {
registerMetrics.Do(func() {
prometheus.MustRegister(ZoneHealth)
prometheus.MustRegister(ZoneSize)
prometheus.MustRegister(UnhealthyNodes)
prometheus.MustRegister(EvictionsNumber)
})
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,282 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"container/heap"
"sync"
"time"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/sets"
"github.com/golang/glog"
)
// TimedValue is a value that should be processed at a designated time.
type TimedValue struct {
Value string
// UID could be anything that helps identify the value
UID interface{}
AddedAt time.Time
ProcessAt time.Time
}
// now is used to test time
var now func() time.Time = time.Now
// TimedQueue is a priority heap where the lowest ProcessAt is at the front of the queue
type TimedQueue []*TimedValue
func (h TimedQueue) Len() int { return len(h) }
func (h TimedQueue) Less(i, j int) bool { return h[i].ProcessAt.Before(h[j].ProcessAt) }
func (h TimedQueue) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *TimedQueue) Push(x interface{}) {
*h = append(*h, x.(*TimedValue))
}
func (h *TimedQueue) Pop() interface{} {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}
// A FIFO queue which additionally guarantees that any element can be added only once until
// it is removed.
type UniqueQueue struct {
lock sync.Mutex
queue TimedQueue
set sets.String
}
// Adds a new value to the queue if it wasn't added before, or was explicitly removed by the
// Remove call. Returns true if new value was added.
func (q *UniqueQueue) Add(value TimedValue) bool {
q.lock.Lock()
defer q.lock.Unlock()
if q.set.Has(value.Value) {
return false
}
heap.Push(&q.queue, &value)
q.set.Insert(value.Value)
return true
}
// Replace replaces an existing value in the queue if it already exists, otherwise it does nothing.
// Returns true if the item was found.
func (q *UniqueQueue) Replace(value TimedValue) bool {
q.lock.Lock()
defer q.lock.Unlock()
for i := range q.queue {
if q.queue[i].Value != value.Value {
continue
}
heap.Remove(&q.queue, i)
heap.Push(&q.queue, &value)
return true
}
return false
}
// Removes the value from the queue, but keeps it in the set, so it won't be added second time.
// Returns true if something was removed.
func (q *UniqueQueue) RemoveFromQueue(value string) bool {
q.lock.Lock()
defer q.lock.Unlock()
if !q.set.Has(value) {
return false
}
for i, val := range q.queue {
if val.Value == value {
heap.Remove(&q.queue, i)
return true
}
}
return false
}
// Removes the value from the queue, so Get() call won't return it, and allow subsequent addition
// of the given value. If the value is not present does nothing and returns false.
func (q *UniqueQueue) Remove(value string) bool {
q.lock.Lock()
defer q.lock.Unlock()
if !q.set.Has(value) {
return false
}
q.set.Delete(value)
for i, val := range q.queue {
if val.Value == value {
heap.Remove(&q.queue, i)
return true
}
}
return true
}
// Returns the oldest added value that wasn't returned yet.
func (q *UniqueQueue) Get() (TimedValue, bool) {
q.lock.Lock()
defer q.lock.Unlock()
if len(q.queue) == 0 {
return TimedValue{}, false
}
result := heap.Pop(&q.queue).(*TimedValue)
q.set.Delete(result.Value)
return *result, true
}
// Head returns the oldest added value that wasn't returned yet without removing it.
func (q *UniqueQueue) Head() (TimedValue, bool) {
q.lock.Lock()
defer q.lock.Unlock()
if len(q.queue) == 0 {
return TimedValue{}, false
}
result := q.queue[0]
return *result, true
}
// Clear removes all items from the queue and duplication preventing set.
func (q *UniqueQueue) Clear() {
q.lock.Lock()
defer q.lock.Unlock()
if q.queue.Len() > 0 {
q.queue = make(TimedQueue, 0)
}
if len(q.set) > 0 {
q.set = sets.NewString()
}
}
// RateLimitedTimedQueue is a unique item priority queue ordered by the expected next time
// of execution. It is also rate limited.
type RateLimitedTimedQueue struct {
queue UniqueQueue
limiterLock sync.Mutex
limiter flowcontrol.RateLimiter
}
// Creates new queue which will use given RateLimiter to oversee execution.
func NewRateLimitedTimedQueue(limiter flowcontrol.RateLimiter) *RateLimitedTimedQueue {
return &RateLimitedTimedQueue{
queue: UniqueQueue{
queue: TimedQueue{},
set: sets.NewString(),
},
limiter: limiter,
}
}
// ActionFunc takes a timed value and returns false if the item must be retried, with an optional
// time.Duration if some minimum wait interval should be used.
type ActionFunc func(TimedValue) (bool, time.Duration)
// Try processes the queue. Ends prematurely if RateLimiter forbids an action and leak is true.
// Otherwise, requeues the item to be processed. Each value is processed once if fn returns true,
// otherwise it is added back to the queue. The returned remaining is used to identify the minimum
// time to execute the next item in the queue. The same value is processed only once unless
// Remove is explicitly called on it (it's done by the cancelPodEviction function in NodeController
// when Node becomes Ready again)
// TODO: figure out a good way to do garbage collection for all Nodes that were removed from
// the cluster.
func (q *RateLimitedTimedQueue) Try(fn ActionFunc) {
val, ok := q.queue.Head()
q.limiterLock.Lock()
defer q.limiterLock.Unlock()
for ok {
// rate limit the queue checking
if !q.limiter.TryAccept() {
glog.V(10).Infof("Try rate limited for value: %v", val)
// Try again later
break
}
now := now()
if now.Before(val.ProcessAt) {
break
}
if ok, wait := fn(val); !ok {
val.ProcessAt = now.Add(wait + 1)
q.queue.Replace(val)
} else {
q.queue.RemoveFromQueue(val.Value)
}
val, ok = q.queue.Head()
}
}
// Adds value to the queue to be processed. Won't add the same value(comparsion by value) a second time
// if it was already added and not removed.
func (q *RateLimitedTimedQueue) Add(value string, uid interface{}) bool {
now := now()
return q.queue.Add(TimedValue{
Value: value,
UID: uid,
AddedAt: now,
ProcessAt: now,
})
}
// Removes Node from the Evictor. The Node won't be processed until added again.
func (q *RateLimitedTimedQueue) Remove(value string) bool {
return q.queue.Remove(value)
}
// Removes all items from the queue
func (q *RateLimitedTimedQueue) Clear() {
q.queue.Clear()
}
// SwapLimiter safely swaps current limiter for this queue with the passed one if capacities or qps's differ.
func (q *RateLimitedTimedQueue) SwapLimiter(newQPS float32) {
q.limiterLock.Lock()
defer q.limiterLock.Unlock()
if q.limiter.QPS() == newQPS {
return
}
var newLimiter flowcontrol.RateLimiter
if newQPS <= 0 {
newLimiter = flowcontrol.NewFakeNeverRateLimiter()
} else {
newLimiter = flowcontrol.NewTokenBucketRateLimiter(newQPS, evictionRateLimiterBurst)
}
// If we're currently waiting on limiter, we drain the new one - this is a good approach when Burst value is 1
// TODO: figure out if we need to support higher Burst values and decide on the drain logic, should we keep:
// - saturation (percentage of used tokens)
// - number of used tokens
// - number of available tokens
// - something else
for q.limiter.Saturation() > newLimiter.Saturation() {
// Check if we're not using fake limiter
previousSaturation := newLimiter.Saturation()
newLimiter.TryAccept()
// It's a fake limiter
if newLimiter.Saturation() == previousSaturation {
break
}
}
q.limiter.Stop()
q.limiter = newLimiter
}

View file

@ -0,0 +1,334 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"reflect"
"testing"
"time"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/sets"
)
func CheckQueueEq(lhs []string, rhs TimedQueue) bool {
for i := 0; i < len(lhs); i++ {
if rhs[i].Value != lhs[i] {
return false
}
}
return true
}
func CheckSetEq(lhs, rhs sets.String) bool {
return lhs.HasAll(rhs.List()...) && rhs.HasAll(lhs.List()...)
}
func TestAddNode(t *testing.T) {
evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
evictor.Add("first", "11111")
evictor.Add("second", "22222")
evictor.Add("third", "33333")
queuePattern := []string{"first", "second", "third"}
if len(evictor.queue.queue) != len(queuePattern) {
t.Fatalf("Queue %v should have length %d", evictor.queue.queue, len(queuePattern))
}
if !CheckQueueEq(queuePattern, evictor.queue.queue) {
t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern)
}
setPattern := sets.NewString("first", "second", "third")
if len(evictor.queue.set) != len(setPattern) {
t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern))
}
if !CheckSetEq(setPattern, evictor.queue.set) {
t.Errorf("Invalid map. Got %v, expected %v", evictor.queue.set, setPattern)
}
}
func TestDelNode(t *testing.T) {
defer func() { now = time.Now }()
var tick int64
now = func() time.Time {
t := time.Unix(tick, 0)
tick++
return t
}
evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
evictor.Add("first", "11111")
evictor.Add("second", "22222")
evictor.Add("third", "33333")
evictor.Remove("first")
queuePattern := []string{"second", "third"}
if len(evictor.queue.queue) != len(queuePattern) {
t.Fatalf("Queue %v should have length %d", evictor.queue.queue, len(queuePattern))
}
if !CheckQueueEq(queuePattern, evictor.queue.queue) {
t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern)
}
setPattern := sets.NewString("second", "third")
if len(evictor.queue.set) != len(setPattern) {
t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern))
}
if !CheckSetEq(setPattern, evictor.queue.set) {
t.Errorf("Invalid map. Got %v, expected %v", evictor.queue.set, setPattern)
}
evictor = NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
evictor.Add("first", "11111")
evictor.Add("second", "22222")
evictor.Add("third", "33333")
evictor.Remove("second")
queuePattern = []string{"first", "third"}
if len(evictor.queue.queue) != len(queuePattern) {
t.Fatalf("Queue %v should have length %d", evictor.queue.queue, len(queuePattern))
}
if !CheckQueueEq(queuePattern, evictor.queue.queue) {
t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern)
}
setPattern = sets.NewString("first", "third")
if len(evictor.queue.set) != len(setPattern) {
t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern))
}
if !CheckSetEq(setPattern, evictor.queue.set) {
t.Errorf("Invalid map. Got %v, expected %v", evictor.queue.set, setPattern)
}
evictor = NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
evictor.Add("first", "11111")
evictor.Add("second", "22222")
evictor.Add("third", "33333")
evictor.Remove("third")
queuePattern = []string{"first", "second"}
if len(evictor.queue.queue) != len(queuePattern) {
t.Fatalf("Queue %v should have length %d", evictor.queue.queue, len(queuePattern))
}
if !CheckQueueEq(queuePattern, evictor.queue.queue) {
t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern)
}
setPattern = sets.NewString("first", "second")
if len(evictor.queue.set) != len(setPattern) {
t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern))
}
if !CheckSetEq(setPattern, evictor.queue.set) {
t.Errorf("Invalid map. Got %v, expected %v", evictor.queue.set, setPattern)
}
}
func TestTry(t *testing.T) {
evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
evictor.Add("first", "11111")
evictor.Add("second", "22222")
evictor.Add("third", "33333")
evictor.Remove("second")
deletedMap := sets.NewString()
evictor.Try(func(value TimedValue) (bool, time.Duration) {
deletedMap.Insert(value.Value)
return true, 0
})
setPattern := sets.NewString("first", "third")
if len(deletedMap) != len(setPattern) {
t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern))
}
if !CheckSetEq(setPattern, deletedMap) {
t.Errorf("Invalid map. Got %v, expected %v", deletedMap, setPattern)
}
}
func TestTryOrdering(t *testing.T) {
defer func() { now = time.Now }()
current := time.Unix(0, 0)
delay := 0
// the current time is incremented by 1ms every time now is invoked
now = func() time.Time {
if delay > 0 {
delay--
} else {
current = current.Add(time.Millisecond)
}
t.Logf("time %d", current.UnixNano())
return current
}
evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
evictor.Add("first", "11111")
evictor.Add("second", "22222")
evictor.Add("third", "33333")
order := []string{}
count := 0
hasQueued := false
evictor.Try(func(value TimedValue) (bool, time.Duration) {
count++
t.Logf("eviction %d", count)
if value.ProcessAt.IsZero() {
t.Fatalf("processAt should not be zero")
}
switch value.Value {
case "first":
if !value.AddedAt.Equal(time.Unix(0, time.Millisecond.Nanoseconds())) {
t.Fatalf("added time for %s is %d", value.Value, value.AddedAt)
}
case "second":
if !value.AddedAt.Equal(time.Unix(0, 2*time.Millisecond.Nanoseconds())) {
t.Fatalf("added time for %s is %d", value.Value, value.AddedAt)
}
if hasQueued {
if !value.ProcessAt.Equal(time.Unix(0, 6*time.Millisecond.Nanoseconds())) {
t.Fatalf("process time for %s is %d", value.Value, value.ProcessAt)
}
break
}
hasQueued = true
delay = 1
t.Logf("going to delay")
return false, 2 * time.Millisecond
case "third":
if !value.AddedAt.Equal(time.Unix(0, 3*time.Millisecond.Nanoseconds())) {
t.Fatalf("added time for %s is %d", value.Value, value.AddedAt)
}
}
order = append(order, value.Value)
return true, 0
})
if !reflect.DeepEqual(order, []string{"first", "third"}) {
t.Fatalf("order was wrong: %v", order)
}
if count != 3 {
t.Fatalf("unexpected iterations: %d", count)
}
}
func TestTryRemovingWhileTry(t *testing.T) {
evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
evictor.Add("first", "11111")
evictor.Add("second", "22222")
evictor.Add("third", "33333")
processing := make(chan struct{})
wait := make(chan struct{})
order := []string{}
count := 0
queued := false
// while the Try function is processing "second", remove it from the queue
// we should not see "second" retried.
go func() {
<-processing
evictor.Remove("second")
close(wait)
}()
evictor.Try(func(value TimedValue) (bool, time.Duration) {
count++
if value.AddedAt.IsZero() {
t.Fatalf("added should not be zero")
}
if value.ProcessAt.IsZero() {
t.Fatalf("next should not be zero")
}
if !queued && value.Value == "second" {
queued = true
close(processing)
<-wait
return false, time.Millisecond
}
order = append(order, value.Value)
return true, 0
})
if !reflect.DeepEqual(order, []string{"first", "third"}) {
t.Fatalf("order was wrong: %v", order)
}
if count != 3 {
t.Fatalf("unexpected iterations: %d", count)
}
}
func TestClear(t *testing.T) {
evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
evictor.Add("first", "11111")
evictor.Add("second", "22222")
evictor.Add("third", "33333")
evictor.Clear()
if len(evictor.queue.queue) != 0 {
t.Fatalf("Clear should remove all elements from the queue.")
}
}
func TestSwapLimiter(t *testing.T) {
evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
fakeAlways := flowcontrol.NewFakeAlwaysRateLimiter()
qps := evictor.limiter.QPS()
if qps != fakeAlways.QPS() {
t.Fatalf("QPS does not match create one: %v instead of %v", qps, fakeAlways.QPS())
}
evictor.SwapLimiter(0)
qps = evictor.limiter.QPS()
fakeNever := flowcontrol.NewFakeNeverRateLimiter()
if qps != fakeNever.QPS() {
t.Fatalf("QPS does not match create one: %v instead of %v", qps, fakeNever.QPS())
}
createdQPS := float32(5.5)
evictor.SwapLimiter(createdQPS)
qps = evictor.limiter.QPS()
if qps != createdQPS {
t.Fatalf("QPS does not match create one: %v instead of %v", qps, createdQPS)
}
}
func TestAddAfterTry(t *testing.T) {
evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
evictor.Add("first", "11111")
evictor.Add("second", "22222")
evictor.Add("third", "33333")
evictor.Remove("second")
deletedMap := sets.NewString()
evictor.Try(func(value TimedValue) (bool, time.Duration) {
deletedMap.Insert(value.Value)
return true, 0
})
setPattern := sets.NewString("first", "third")
if len(deletedMap) != len(setPattern) {
t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern))
}
if !CheckSetEq(setPattern, deletedMap) {
t.Errorf("Invalid map. Got %v, expected %v", deletedMap, setPattern)
}
evictor.Add("first", "11111")
evictor.Try(func(value TimedValue) (bool, time.Duration) {
t.Errorf("We shouldn't process the same value if the explicit remove wasn't called.")
return true, 0
})
}

View file

@ -0,0 +1,332 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"errors"
"fmt"
"sync"
"time"
"k8s.io/kubernetes/pkg/api"
apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/clock"
utilnode "k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/watch"
)
// FakeNodeHandler is a fake implementation of NodesInterface and NodeInterface. It
// allows test cases to have fine-grained control over mock behaviors. We also need
// PodsInterface and PodInterface to test list & delet pods, which is implemented in
// the embedded client.Fake field.
type FakeNodeHandler struct {
*fake.Clientset
// Input: Hooks determine if request is valid or not
CreateHook func(*FakeNodeHandler, *v1.Node) bool
Existing []*v1.Node
// Output
CreatedNodes []*v1.Node
DeletedNodes []*v1.Node
UpdatedNodes []*v1.Node
UpdatedNodeStatuses []*v1.Node
RequestCount int
// Synchronization
lock sync.Mutex
deleteWaitChan chan struct{}
}
type FakeLegacyHandler struct {
v1core.CoreV1Interface
n *FakeNodeHandler
}
func (c *FakeNodeHandler) getUpdatedNodesCopy() []*v1.Node {
c.lock.Lock()
defer c.lock.Unlock()
updatedNodesCopy := make([]*v1.Node, len(c.UpdatedNodes), len(c.UpdatedNodes))
for i, ptr := range c.UpdatedNodes {
updatedNodesCopy[i] = ptr
}
return updatedNodesCopy
}
func (c *FakeNodeHandler) Core() v1core.CoreV1Interface {
return &FakeLegacyHandler{c.Clientset.Core(), c}
}
func (m *FakeLegacyHandler) Nodes() v1core.NodeInterface {
return m.n
}
func (m *FakeNodeHandler) Create(node *v1.Node) (*v1.Node, error) {
m.lock.Lock()
defer func() {
m.RequestCount++
m.lock.Unlock()
}()
for _, n := range m.Existing {
if n.Name == node.Name {
return nil, apierrors.NewAlreadyExists(api.Resource("nodes"), node.Name)
}
}
if m.CreateHook == nil || m.CreateHook(m, node) {
nodeCopy := *node
m.CreatedNodes = append(m.CreatedNodes, &nodeCopy)
return node, nil
} else {
return nil, errors.New("Create error.")
}
}
func (m *FakeNodeHandler) Get(name string) (*v1.Node, error) {
m.lock.Lock()
defer func() {
m.RequestCount++
m.lock.Unlock()
}()
for i := range m.UpdatedNodes {
if m.UpdatedNodes[i].Name == name {
nodeCopy := *m.UpdatedNodes[i]
return &nodeCopy, nil
}
}
for i := range m.Existing {
if m.Existing[i].Name == name {
nodeCopy := *m.Existing[i]
return &nodeCopy, nil
}
}
return nil, nil
}
func (m *FakeNodeHandler) List(opts v1.ListOptions) (*v1.NodeList, error) {
m.lock.Lock()
defer func() {
m.RequestCount++
m.lock.Unlock()
}()
var nodes []*v1.Node
for i := 0; i < len(m.UpdatedNodes); i++ {
if !contains(m.UpdatedNodes[i], m.DeletedNodes) {
nodes = append(nodes, m.UpdatedNodes[i])
}
}
for i := 0; i < len(m.Existing); i++ {
if !contains(m.Existing[i], m.DeletedNodes) && !contains(m.Existing[i], nodes) {
nodes = append(nodes, m.Existing[i])
}
}
for i := 0; i < len(m.CreatedNodes); i++ {
if !contains(m.CreatedNodes[i], m.DeletedNodes) && !contains(m.CreatedNodes[i], nodes) {
nodes = append(nodes, m.CreatedNodes[i])
}
}
nodeList := &v1.NodeList{}
for _, node := range nodes {
nodeList.Items = append(nodeList.Items, *node)
}
return nodeList, nil
}
func (m *FakeNodeHandler) Delete(id string, opt *v1.DeleteOptions) error {
m.lock.Lock()
defer func() {
m.RequestCount++
if m.deleteWaitChan != nil {
m.deleteWaitChan <- struct{}{}
}
m.lock.Unlock()
}()
m.DeletedNodes = append(m.DeletedNodes, newNode(id))
return nil
}
func (m *FakeNodeHandler) DeleteCollection(opt *v1.DeleteOptions, listOpts v1.ListOptions) error {
return nil
}
func (m *FakeNodeHandler) Update(node *v1.Node) (*v1.Node, error) {
m.lock.Lock()
defer func() {
m.RequestCount++
m.lock.Unlock()
}()
nodeCopy := *node
for i, updateNode := range m.UpdatedNodes {
if updateNode.Name == nodeCopy.Name {
m.UpdatedNodes[i] = &nodeCopy
return node, nil
}
}
m.UpdatedNodes = append(m.UpdatedNodes, &nodeCopy)
return node, nil
}
func (m *FakeNodeHandler) UpdateStatus(node *v1.Node) (*v1.Node, error) {
m.lock.Lock()
defer func() {
m.RequestCount++
m.lock.Unlock()
}()
nodeCopy := *node
m.UpdatedNodeStatuses = append(m.UpdatedNodeStatuses, &nodeCopy)
return node, nil
}
func (m *FakeNodeHandler) PatchStatus(nodeName string, data []byte) (*v1.Node, error) {
m.RequestCount++
return &v1.Node{}, nil
}
func (m *FakeNodeHandler) Watch(opts v1.ListOptions) (watch.Interface, error) {
return watch.NewFake(), nil
}
func (m *FakeNodeHandler) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (*v1.Node, error) {
return nil, nil
}
// FakeRecorder is used as a fake during testing.
type FakeRecorder struct {
source v1.EventSource
events []*v1.Event
clock clock.Clock
}
func (f *FakeRecorder) Event(obj runtime.Object, eventtype, reason, message string) {
f.generateEvent(obj, metav1.Now(), eventtype, reason, message)
}
func (f *FakeRecorder) Eventf(obj runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
f.Event(obj, eventtype, reason, fmt.Sprintf(messageFmt, args...))
}
func (f *FakeRecorder) PastEventf(obj runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) {
}
func (f *FakeRecorder) generateEvent(obj runtime.Object, timestamp metav1.Time, eventtype, reason, message string) {
ref, err := v1.GetReference(obj)
if err != nil {
return
}
event := f.makeEvent(ref, eventtype, reason, message)
event.Source = f.source
if f.events != nil {
fmt.Println("write event")
f.events = append(f.events, event)
}
}
func (f *FakeRecorder) makeEvent(ref *v1.ObjectReference, eventtype, reason, message string) *v1.Event {
fmt.Println("make event")
t := metav1.Time{Time: f.clock.Now()}
namespace := ref.Namespace
if namespace == "" {
namespace = v1.NamespaceDefault
}
return &v1.Event{
ObjectMeta: v1.ObjectMeta{
Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()),
Namespace: namespace,
},
InvolvedObject: *ref,
Reason: reason,
Message: message,
FirstTimestamp: t,
LastTimestamp: t,
Count: 1,
Type: eventtype,
}
}
func NewFakeRecorder() *FakeRecorder {
return &FakeRecorder{
source: v1.EventSource{Component: "nodeControllerTest"},
events: []*v1.Event{},
clock: clock.NewFakeClock(time.Now()),
}
}
func newNode(name string) *v1.Node {
return &v1.Node{
ObjectMeta: v1.ObjectMeta{Name: name},
Spec: v1.NodeSpec{
ExternalID: name,
},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("10"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
},
},
}
}
func newPod(name, host string) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Namespace: "default",
Name: name,
},
Spec: v1.PodSpec{
NodeName: host,
},
Status: v1.PodStatus{
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
},
},
},
}
return pod
}
func contains(node *v1.Node, nodes []*v1.Node) bool {
for i := 0; i < len(nodes); i++ {
if node.Name == nodes[i].Name {
return true
}
}
return false
}
// Returns list of zones for all Nodes stored in FakeNodeHandler
func getZones(nodeHandler *FakeNodeHandler) []string {
nodes, _ := nodeHandler.List(v1.ListOptions{})
zones := sets.NewString()
for _, node := range nodes.Items {
zones.Insert(utilnode.GetZoneKey(&node))
}
return zones.List()
}
func createZoneID(region, zone string) string {
return region + ":\x00:" + zone
}