Add glide.yaml and vendor deps

This commit is contained in:
Dalton Hubble 2016-12-03 22:43:32 -08:00
parent db918f12ad
commit 5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions

View file

@ -0,0 +1,17 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["doc.go"],
tags = ["automanaged"],
)

View file

@ -0,0 +1,4 @@
assignees:
- quinton-hoole
- nikhiljindal
- madhusudancs

View file

@ -0,0 +1,60 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"cluster_client.go",
"clustercontroller.go",
"doc.go",
],
tags = ["automanaged"],
deps = [
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/client/cache:go_default_library",
"//federation/client/clientset_generated/federation_release_1_5:go_default_library",
"//federation/pkg/federation-controller/util:go_default_library",
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/client/restclient:go_default_library",
"//pkg/client/typed/discovery:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/util/runtime:go_default_library",
"//pkg/util/sets:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = ["clustercontroller_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/client/clientset_generated/federation_release_1_5:go_default_library",
"//federation/pkg/federation-controller/util:go_default_library",
"//pkg/api/testapi:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/restclient:go_default_library",
"//pkg/client/unversioned/clientcmd:go_default_library",
"//pkg/client/unversioned/clientcmd/api:go_default_library",
"//pkg/util/uuid:go_default_library",
],
)

View file

@ -0,0 +1,169 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cluster
import (
"fmt"
"strings"
"github.com/golang/glog"
federation_v1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/typed/discovery"
"k8s.io/kubernetes/pkg/util/sets"
)
const (
UserAgentName = "Cluster-Controller"
KubeAPIQPS = 20.0
KubeAPIBurst = 30
KubeconfigSecretDataKey = "kubeconfig"
)
type ClusterClient struct {
discoveryClient *discovery.DiscoveryClient
kubeClient *clientset.Clientset
}
func NewClusterClientSet(c *federation_v1beta1.Cluster) (*ClusterClient, error) {
clusterConfig, err := util.BuildClusterConfig(c)
if err != nil {
return nil, err
}
var clusterClientSet = ClusterClient{}
if clusterConfig != nil {
clusterClientSet.discoveryClient = discovery.NewDiscoveryClientForConfigOrDie((restclient.AddUserAgent(clusterConfig, UserAgentName)))
if clusterClientSet.discoveryClient == nil {
return nil, nil
}
clusterClientSet.kubeClient = clientset.NewForConfigOrDie((restclient.AddUserAgent(clusterConfig, UserAgentName)))
if clusterClientSet.kubeClient == nil {
return nil, nil
}
}
return &clusterClientSet, nil
}
// GetClusterHealthStatus gets the kubernetes cluster health status by requesting "/healthz"
func (self *ClusterClient) GetClusterHealthStatus() *federation_v1beta1.ClusterStatus {
clusterStatus := federation_v1beta1.ClusterStatus{}
currentTime := metav1.Now()
newClusterReadyCondition := federation_v1beta1.ClusterCondition{
Type: federation_v1beta1.ClusterReady,
Status: v1.ConditionTrue,
Reason: "ClusterReady",
Message: "/healthz responded with ok",
LastProbeTime: currentTime,
LastTransitionTime: currentTime,
}
newClusterNotReadyCondition := federation_v1beta1.ClusterCondition{
Type: federation_v1beta1.ClusterReady,
Status: v1.ConditionFalse,
Reason: "ClusterNotReady",
Message: "/healthz responded without ok",
LastProbeTime: currentTime,
LastTransitionTime: currentTime,
}
newNodeOfflineCondition := federation_v1beta1.ClusterCondition{
Type: federation_v1beta1.ClusterOffline,
Status: v1.ConditionTrue,
Reason: "ClusterNotReachable",
Message: "cluster is not reachable",
LastProbeTime: currentTime,
LastTransitionTime: currentTime,
}
newNodeNotOfflineCondition := federation_v1beta1.ClusterCondition{
Type: federation_v1beta1.ClusterOffline,
Status: v1.ConditionFalse,
Reason: "ClusterReachable",
Message: "cluster is reachable",
LastProbeTime: currentTime,
LastTransitionTime: currentTime,
}
body, err := self.discoveryClient.RESTClient().Get().AbsPath("/healthz").Do().Raw()
if err != nil {
clusterStatus.Conditions = append(clusterStatus.Conditions, newNodeOfflineCondition)
} else {
if !strings.EqualFold(string(body), "ok") {
clusterStatus.Conditions = append(clusterStatus.Conditions, newClusterNotReadyCondition, newNodeNotOfflineCondition)
} else {
clusterStatus.Conditions = append(clusterStatus.Conditions, newClusterReadyCondition)
}
}
return &clusterStatus
}
// GetClusterZones gets the kubernetes cluster zones and region by inspecting labels on nodes in the cluster.
func (self *ClusterClient) GetClusterZones() (zones []string, region string, err error) {
return getZoneNames(self.kubeClient)
}
// Find the name of the zone in which a Node is running
func getZoneNameForNode(node api.Node) (string, error) {
for key, value := range node.Labels {
if key == metav1.LabelZoneFailureDomain {
return value, nil
}
}
return "", fmt.Errorf("Zone name for node %s not found. No label with key %s",
node.Name, metav1.LabelZoneFailureDomain)
}
// Find the name of the region in which a Node is running
func getRegionNameForNode(node api.Node) (string, error) {
for key, value := range node.Labels {
if key == metav1.LabelZoneRegion {
return value, nil
}
}
return "", fmt.Errorf("Region name for node %s not found. No label with key %s",
node.Name, metav1.LabelZoneRegion)
}
// Find the names of all zones and the region in which we have nodes in this cluster.
func getZoneNames(client *clientset.Clientset) (zones []string, region string, err error) {
zoneNames := sets.NewString()
nodes, err := client.Core().Nodes().List(api.ListOptions{})
if err != nil {
glog.Errorf("Failed to list nodes while getting zone names: %v", err)
return nil, "", err
}
for i, node := range nodes.Items {
// TODO: quinton-hoole make this more efficient.
// For non-multi-zone clusters the zone will
// be identical for all nodes, so we only need to look at one node
// For multi-zone clusters we know at build time
// which zones are included. Rather get this info from there, because it's cheaper.
zoneName, err := getZoneNameForNode(node)
if err != nil {
return nil, "", err
}
zoneNames.Insert(zoneName)
if i == 0 {
region, err = getRegionNameForNode(node)
if err != nil {
return nil, "", err
}
}
}
return zoneNames.List(), region, nil
}

View file

@ -0,0 +1,209 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cluster
import (
"strings"
"time"
"github.com/golang/glog"
federationv1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1"
clustercache "k8s.io/kubernetes/federation/client/cache"
federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/runtime"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/watch"
)
type ClusterController struct {
knownClusterSet sets.String
// federationClient used to operate cluster
federationClient federationclientset.Interface
// clusterMonitorPeriod is the period for updating status of cluster
clusterMonitorPeriod time.Duration
// clusterClusterStatusMap is a mapping of clusterName and cluster status of last sampling
clusterClusterStatusMap map[string]federationv1beta1.ClusterStatus
// clusterKubeClientMap is a mapping of clusterName and restclient
clusterKubeClientMap map[string]ClusterClient
// cluster framework and store
clusterController *cache.Controller
clusterStore clustercache.StoreToClusterLister
}
// NewclusterController returns a new cluster controller
func NewclusterController(federationClient federationclientset.Interface, clusterMonitorPeriod time.Duration) *ClusterController {
cc := &ClusterController{
knownClusterSet: make(sets.String),
federationClient: federationClient,
clusterMonitorPeriod: clusterMonitorPeriod,
clusterClusterStatusMap: make(map[string]federationv1beta1.ClusterStatus),
clusterKubeClientMap: make(map[string]ClusterClient),
}
cc.clusterStore.Store, cc.clusterController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return cc.federationClient.Federation().Clusters().List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return cc.federationClient.Federation().Clusters().Watch(options)
},
},
&federationv1beta1.Cluster{},
controller.NoResyncPeriodFunc(),
cache.ResourceEventHandlerFuncs{
DeleteFunc: cc.delFromClusterSet,
AddFunc: cc.addToClusterSet,
},
)
return cc
}
// delFromClusterSet delete a cluster from clusterSet and
// delete the corresponding restclient from the map clusterKubeClientMap
func (cc *ClusterController) delFromClusterSet(obj interface{}) {
cluster := obj.(*federationv1beta1.Cluster)
cc.knownClusterSet.Delete(cluster.Name)
delete(cc.clusterKubeClientMap, cluster.Name)
}
// addToClusterSet insert the new cluster to clusterSet and create a corresponding
// restclient to map clusterKubeClientMap
func (cc *ClusterController) addToClusterSet(obj interface{}) {
cluster := obj.(*federationv1beta1.Cluster)
cc.knownClusterSet.Insert(cluster.Name)
// create the restclient of cluster
restClient, err := NewClusterClientSet(cluster)
if err != nil || restClient == nil {
glog.Errorf("Failed to create corresponding restclient of kubernetes cluster: %v", err)
return
}
cc.clusterKubeClientMap[cluster.Name] = *restClient
}
// Run begins watching and syncing.
func (cc *ClusterController) Run() {
defer utilruntime.HandleCrash()
go cc.clusterController.Run(wait.NeverStop)
// monitor cluster status periodically, in phase 1 we just get the health state from "/healthz"
go wait.Until(func() {
if err := cc.UpdateClusterStatus(); err != nil {
glog.Errorf("Error monitoring cluster status: %v", err)
}
}, cc.clusterMonitorPeriod, wait.NeverStop)
}
func (cc *ClusterController) GetClusterStatus(cluster *federationv1beta1.Cluster) (*federationv1beta1.ClusterStatus, error) {
// just get the status of cluster, by requesting the restapi "/healthz"
clusterClient, found := cc.clusterKubeClientMap[cluster.Name]
if !found {
glog.Infof("It's a new cluster, a cluster client will be created")
client, err := NewClusterClientSet(cluster)
if err != nil || client == nil {
glog.Errorf("Failed to create cluster client, err: %v", err)
return nil, err
}
clusterClient = *client
cc.clusterKubeClientMap[cluster.Name] = clusterClient
}
clusterStatus := clusterClient.GetClusterHealthStatus()
return clusterStatus, nil
}
// UpdateClusterStatus checks cluster status and get the metrics from cluster's restapi
func (cc *ClusterController) UpdateClusterStatus() error {
clusters, err := cc.federationClient.Federation().Clusters().List(v1.ListOptions{})
if err != nil {
return err
}
for _, cluster := range clusters.Items {
if !cc.knownClusterSet.Has(cluster.Name) {
glog.V(1).Infof("ClusterController observed a new cluster: %#v", cluster)
cc.knownClusterSet.Insert(cluster.Name)
}
}
// If there's a difference between lengths of known clusters and observed clusters
if len(cc.knownClusterSet) != len(clusters.Items) {
observedSet := make(sets.String)
for _, cluster := range clusters.Items {
observedSet.Insert(cluster.Name)
}
deleted := cc.knownClusterSet.Difference(observedSet)
for clusterName := range deleted {
glog.V(1).Infof("ClusterController observed a Cluster deletion: %v", clusterName)
cc.knownClusterSet.Delete(clusterName)
}
}
for _, cluster := range clusters.Items {
clusterStatusNew, err := cc.GetClusterStatus(&cluster)
if err != nil {
glog.Infof("Failed to Get the status of cluster: %v", cluster.Name)
continue
}
clusterStatusOld, found := cc.clusterClusterStatusMap[cluster.Name]
if !found {
glog.Infof("There is no status stored for cluster: %v before", cluster.Name)
} else {
hasTransition := false
for i := 0; i < len(clusterStatusNew.Conditions); i++ {
if !(strings.EqualFold(string(clusterStatusNew.Conditions[i].Type), string(clusterStatusOld.Conditions[i].Type)) &&
strings.EqualFold(string(clusterStatusNew.Conditions[i].Status), string(clusterStatusOld.Conditions[i].Status))) {
hasTransition = true
break
}
}
if !hasTransition {
for j := 0; j < len(clusterStatusNew.Conditions); j++ {
clusterStatusNew.Conditions[j].LastTransitionTime = clusterStatusOld.Conditions[j].LastTransitionTime
}
}
}
clusterClient, found := cc.clusterKubeClientMap[cluster.Name]
if !found {
glog.Warningf("Failed to client for cluster %s", cluster.Name)
continue
}
zones, region, err := clusterClient.GetClusterZones()
if err != nil {
glog.Warningf("Failed to get zones and region for cluster %s: %v", cluster.Name, err)
// Don't return err here, as we want the rest of the status update to proceed.
} else {
clusterStatusNew.Zones = zones
clusterStatusNew.Region = region
}
cc.clusterClusterStatusMap[cluster.Name] = *clusterStatusNew
cluster.Status = *clusterStatusNew
cluster, err := cc.federationClient.Federation().Clusters().UpdateStatus(&cluster)
if err != nil {
glog.Warningf("Failed to update the status of cluster: %v ,error is : %v", cluster.Name, err)
// Don't return err here, as we want to continue processing remaining clusters.
continue
}
}
return nil
}

View file

@ -0,0 +1,151 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cluster
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"testing"
federationv1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
controllerutil "k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
"k8s.io/kubernetes/pkg/util/uuid"
)
func newCluster(clusterName string, serverUrl string) *federationv1beta1.Cluster {
cluster := federationv1beta1.Cluster{
TypeMeta: metav1.TypeMeta{APIVersion: testapi.Federation.GroupVersion().String()},
ObjectMeta: v1.ObjectMeta{
UID: uuid.NewUUID(),
Name: clusterName,
},
Spec: federationv1beta1.ClusterSpec{
ServerAddressByClientCIDRs: []federationv1beta1.ServerAddressByClientCIDR{
{
ClientCIDR: "0.0.0.0/0",
ServerAddress: serverUrl,
},
},
},
}
return &cluster
}
func newClusterList(cluster *federationv1beta1.Cluster) *federationv1beta1.ClusterList {
clusterList := federationv1beta1.ClusterList{
TypeMeta: metav1.TypeMeta{APIVersion: testapi.Federation.GroupVersion().String()},
ListMeta: metav1.ListMeta{
SelfLink: "foobar",
},
Items: []federationv1beta1.Cluster{},
}
clusterList.Items = append(clusterList.Items, *cluster)
return &clusterList
}
// init a fake http handler, simulate a federation apiserver, response the "DELETE" "PUT" "GET" "UPDATE"
// when "canBeGotten" is false, means that user can not get the cluster cluster from apiserver
func createHttptestFakeHandlerForFederation(clusterList *federationv1beta1.ClusterList, canBeGotten bool) *http.HandlerFunc {
fakeHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
clusterListString, _ := json.Marshal(*clusterList)
w.Header().Set("Content-Type", "application/json")
switch r.Method {
case "PUT":
fmt.Fprintln(w, string(clusterListString))
case "GET":
if canBeGotten {
fmt.Fprintln(w, string(clusterListString))
} else {
fmt.Fprintln(w, "")
}
default:
fmt.Fprintln(w, "")
}
})
return &fakeHandler
}
// init a fake http handler, simulate a cluster apiserver, response the "/healthz"
// when "canBeGotten" is false, means that user can not get response from apiserver
func createHttptestFakeHandlerForCluster(canBeGotten bool) *http.HandlerFunc {
fakeHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
switch r.Method {
case "GET":
if canBeGotten {
fmt.Fprintln(w, "ok")
} else {
w.WriteHeader(http.StatusNotFound)
}
default:
fmt.Fprintln(w, "")
}
})
return &fakeHandler
}
func TestUpdateClusterStatusOK(t *testing.T) {
clusterName := "foobarCluster"
// create dummy httpserver
testClusterServer := httptest.NewServer(createHttptestFakeHandlerForCluster(true))
defer testClusterServer.Close()
federationCluster := newCluster(clusterName, testClusterServer.URL)
federationClusterList := newClusterList(federationCluster)
testFederationServer := httptest.NewServer(createHttptestFakeHandlerForFederation(federationClusterList, true))
defer testFederationServer.Close()
restClientCfg, err := clientcmd.BuildConfigFromFlags(testFederationServer.URL, "")
if err != nil {
t.Errorf("Failed to build client config")
}
federationClientSet := federationclientset.NewForConfigOrDie(restclient.AddUserAgent(restClientCfg, "cluster-controller"))
// Override KubeconfigGetterForCluster to avoid having to setup service accounts and mount files with secret tokens.
originalGetter := controllerutil.KubeconfigGetterForCluster
controllerutil.KubeconfigGetterForCluster = func(c *federationv1beta1.Cluster) clientcmd.KubeconfigGetter {
return func() (*clientcmdapi.Config, error) {
return &clientcmdapi.Config{}, nil
}
}
manager := NewclusterController(federationClientSet, 5)
err = manager.UpdateClusterStatus()
if err != nil {
t.Errorf("Failed to Update Cluster Status: %v", err)
}
clusterStatus, found := manager.clusterClusterStatusMap[clusterName]
if !found {
t.Errorf("Failed to Update Cluster Status")
} else {
if (clusterStatus.Conditions[1].Status != v1.ConditionFalse) || (clusterStatus.Conditions[1].Type != federationv1beta1.ClusterOffline) {
t.Errorf("Failed to Update Cluster Status")
}
}
// Reset KubeconfigGetterForCluster
controllerutil.KubeconfigGetterForCluster = originalGetter
}

View file

@ -0,0 +1,18 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cluster contains code for syncing cluster
package cluster // import "k8s.io/kubernetes/federation/pkg/federation-controller/cluster"

View file

@ -0,0 +1,54 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["configmap_controller.go"],
tags = ["automanaged"],
deps = [
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/client/clientset_generated/federation_release_1_5:go_default_library",
"//federation/pkg/federation-controller/util:go_default_library",
"//federation/pkg/federation-controller/util/eventsink:go_default_library",
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/flowcontrol:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = ["configmap_controller_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/client/clientset_generated/federation_release_1_5/fake:go_default_library",
"//federation/pkg/federation-controller/util:go_default_library",
"//federation/pkg/federation-controller/util/test:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/wait:go_default_library",
"//vendor:github.com/stretchr/testify/assert",
],
)

View file

@ -0,0 +1,315 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package configmap
import (
"time"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/eventsink"
"k8s.io/kubernetes/pkg/api"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller"
pkgruntime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/watch"
"github.com/golang/glog"
)
const (
allClustersKey = "ALL_CLUSTERS"
)
type ConfigMapController struct {
// For triggering single configmap reconciliation. This is used when there is an
// add/update/delete operation on a configmap in either federated API server or
// in some member of the federation.
configmapDeliverer *util.DelayingDeliverer
// For triggering all configmaps reconciliation. This is used when
// a new cluster becomes available.
clusterDeliverer *util.DelayingDeliverer
// Contains configmaps present in members of federation.
configmapFederatedInformer util.FederatedInformer
// For updating members of federation.
federatedUpdater util.FederatedUpdater
// Definitions of configmaps that should be federated.
configmapInformerStore cache.Store
// Informer controller for configmaps that should be federated.
configmapInformerController cache.ControllerInterface
// Client to federated api server.
federatedApiClient federationclientset.Interface
// Backoff manager for configmaps
configmapBackoff *flowcontrol.Backoff
// For events
eventRecorder record.EventRecorder
configmapReviewDelay time.Duration
clusterAvailableDelay time.Duration
smallDelay time.Duration
updateTimeout time.Duration
}
// NewConfigMapController returns a new configmap controller
func NewConfigMapController(client federationclientset.Interface) *ConfigMapController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
recorder := broadcaster.NewRecorder(apiv1.EventSource{Component: "federated-configmaps-controller"})
configmapcontroller := &ConfigMapController{
federatedApiClient: client,
configmapReviewDelay: time.Second * 10,
clusterAvailableDelay: time.Second * 20,
smallDelay: time.Second * 3,
updateTimeout: time.Second * 30,
configmapBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute),
eventRecorder: recorder,
}
// Build delivereres for triggering reconciliations.
configmapcontroller.configmapDeliverer = util.NewDelayingDeliverer()
configmapcontroller.clusterDeliverer = util.NewDelayingDeliverer()
// Start informer on federated API servers on configmaps that should be federated.
configmapcontroller.configmapInformerStore, configmapcontroller.configmapInformerController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return client.Core().ConfigMaps(apiv1.NamespaceAll).List(options)
},
WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return client.Core().ConfigMaps(apiv1.NamespaceAll).Watch(options)
},
},
&apiv1.ConfigMap{},
controller.NoResyncPeriodFunc(),
util.NewTriggerOnAllChanges(func(obj pkgruntime.Object) { configmapcontroller.deliverConfigMapObj(obj, 0, false) }))
// Federated informer on configmaps in members of federation.
configmapcontroller.configmapFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return targetClient.Core().ConfigMaps(apiv1.NamespaceAll).List(options)
},
WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return targetClient.Core().ConfigMaps(apiv1.NamespaceAll).Watch(options)
},
},
&apiv1.ConfigMap{},
controller.NoResyncPeriodFunc(),
// Trigger reconciliation whenever something in federated cluster is changed. In most cases it
// would be just confirmation that some configmap opration succeeded.
util.NewTriggerOnAllChanges(
func(obj pkgruntime.Object) {
configmapcontroller.deliverConfigMapObj(obj, configmapcontroller.configmapReviewDelay, false)
},
))
},
&util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federationapi.Cluster) {
// When new cluster becomes available process all the configmaps again.
configmapcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(configmapcontroller.clusterAvailableDelay))
},
},
)
// Federated updater along with Create/Update/Delete operations.
configmapcontroller.federatedUpdater = util.NewFederatedUpdater(configmapcontroller.configmapFederatedInformer,
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
configmap := obj.(*apiv1.ConfigMap)
_, err := client.Core().ConfigMaps(configmap.Namespace).Create(configmap)
return err
},
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
configmap := obj.(*apiv1.ConfigMap)
_, err := client.Core().ConfigMaps(configmap.Namespace).Update(configmap)
return err
},
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
configmap := obj.(*apiv1.ConfigMap)
err := client.Core().ConfigMaps(configmap.Namespace).Delete(configmap.Name, &apiv1.DeleteOptions{})
return err
})
return configmapcontroller
}
func (configmapcontroller *ConfigMapController) Run(stopChan <-chan struct{}) {
go configmapcontroller.configmapInformerController.Run(stopChan)
configmapcontroller.configmapFederatedInformer.Start()
go func() {
<-stopChan
configmapcontroller.configmapFederatedInformer.Stop()
}()
configmapcontroller.configmapDeliverer.StartWithHandler(func(item *util.DelayingDelivererItem) {
configmap := item.Value.(*types.NamespacedName)
configmapcontroller.reconcileConfigMap(*configmap)
})
configmapcontroller.clusterDeliverer.StartWithHandler(func(_ *util.DelayingDelivererItem) {
configmapcontroller.reconcileConfigMapsOnClusterChange()
})
util.StartBackoffGC(configmapcontroller.configmapBackoff, stopChan)
}
func (configmapcontroller *ConfigMapController) deliverConfigMapObj(obj interface{}, delay time.Duration, failed bool) {
configmap := obj.(*apiv1.ConfigMap)
configmapcontroller.deliverConfigMap(types.NamespacedName{Namespace: configmap.Namespace, Name: configmap.Name}, delay, failed)
}
// Adds backoff to delay if this delivery is related to some failure. Resets backoff if there was no failure.
func (configmapcontroller *ConfigMapController) deliverConfigMap(configmap types.NamespacedName, delay time.Duration, failed bool) {
key := configmap.String()
if failed {
configmapcontroller.configmapBackoff.Next(key, time.Now())
delay = delay + configmapcontroller.configmapBackoff.Get(key)
} else {
configmapcontroller.configmapBackoff.Reset(key)
}
configmapcontroller.configmapDeliverer.DeliverAfter(key, &configmap, delay)
}
// Check whether all data stores are in sync. False is returned if any of the informer/stores is not yet
// synced with the corresponding api server.
func (configmapcontroller *ConfigMapController) isSynced() bool {
if !configmapcontroller.configmapFederatedInformer.ClustersSynced() {
glog.V(2).Infof("Cluster list not synced")
return false
}
clusters, err := configmapcontroller.configmapFederatedInformer.GetReadyClusters()
if err != nil {
glog.Errorf("Failed to get ready clusters: %v", err)
return false
}
if !configmapcontroller.configmapFederatedInformer.GetTargetStore().ClustersSynced(clusters) {
return false
}
return true
}
// The function triggers reconciliation of all federated configmaps.
func (configmapcontroller *ConfigMapController) reconcileConfigMapsOnClusterChange() {
if !configmapcontroller.isSynced() {
glog.V(4).Infof("Configmap controller not synced")
configmapcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(configmapcontroller.clusterAvailableDelay))
}
for _, obj := range configmapcontroller.configmapInformerStore.List() {
configmap := obj.(*apiv1.ConfigMap)
configmapcontroller.deliverConfigMap(types.NamespacedName{Namespace: configmap.Namespace, Name: configmap.Name},
configmapcontroller.smallDelay, false)
}
}
func (configmapcontroller *ConfigMapController) reconcileConfigMap(configmap types.NamespacedName) {
if !configmapcontroller.isSynced() {
glog.V(4).Infof("Configmap controller not synced")
configmapcontroller.deliverConfigMap(configmap, configmapcontroller.clusterAvailableDelay, false)
return
}
key := configmap.String()
baseConfigMapObj, exist, err := configmapcontroller.configmapInformerStore.GetByKey(key)
if err != nil {
glog.Errorf("Failed to query main configmap store for %v: %v", key, err)
configmapcontroller.deliverConfigMap(configmap, 0, true)
return
}
if !exist {
// Not federated configmap, ignoring.
glog.V(8).Infof("Skipping not federated config map: %s", key)
return
}
baseConfigMap := baseConfigMapObj.(*apiv1.ConfigMap)
clusters, err := configmapcontroller.configmapFederatedInformer.GetReadyClusters()
if err != nil {
glog.Errorf("Failed to get cluster list: %v, retrying shortly", err)
configmapcontroller.deliverConfigMap(configmap, configmapcontroller.clusterAvailableDelay, false)
return
}
operations := make([]util.FederatedOperation, 0)
for _, cluster := range clusters {
clusterConfigMapObj, found, err := configmapcontroller.configmapFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key)
if err != nil {
glog.Errorf("Failed to get %s from %s: %v, retrying shortly", key, cluster.Name, err)
configmapcontroller.deliverConfigMap(configmap, 0, true)
return
}
// Do not modify data.
desiredConfigMap := &apiv1.ConfigMap{
ObjectMeta: util.DeepCopyRelevantObjectMeta(baseConfigMap.ObjectMeta),
Data: baseConfigMap.Data,
}
if !found {
configmapcontroller.eventRecorder.Eventf(baseConfigMap, api.EventTypeNormal, "CreateInCluster",
"Creating configmap in cluster %s", cluster.Name)
operations = append(operations, util.FederatedOperation{
Type: util.OperationTypeAdd,
Obj: desiredConfigMap,
ClusterName: cluster.Name,
})
} else {
clusterConfigMap := clusterConfigMapObj.(*apiv1.ConfigMap)
// Update existing configmap, if needed.
if !util.ConfigMapEquivalent(desiredConfigMap, clusterConfigMap) {
configmapcontroller.eventRecorder.Eventf(baseConfigMap, api.EventTypeNormal, "UpdateInCluster",
"Updating configmap in cluster %s", cluster.Name)
operations = append(operations, util.FederatedOperation{
Type: util.OperationTypeUpdate,
Obj: desiredConfigMap,
ClusterName: cluster.Name,
})
}
}
}
if len(operations) == 0 {
// Everything is in order
glog.V(8).Infof("No operations needed for %s", key)
return
}
err = configmapcontroller.federatedUpdater.UpdateWithOnError(operations, configmapcontroller.updateTimeout,
func(op util.FederatedOperation, operror error) {
configmapcontroller.eventRecorder.Eventf(baseConfigMap, api.EventTypeNormal, "UpdateInClusterFailed",
"ConfigMap update in cluster %s failed: %v", op.ClusterName, operror)
})
if err != nil {
glog.Errorf("Failed to execute updates for %s: %v, retrying shortly", key, err)
configmapcontroller.deliverConfigMap(configmap, 0, true)
return
}
}

View file

@ -0,0 +1,142 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package configmap
import (
"fmt"
"testing"
"time"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fakefedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
. "k8s.io/kubernetes/federation/pkg/federation-controller/util/test"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/wait"
"github.com/stretchr/testify/assert"
)
func TestConfigMapController(t *testing.T) {
cluster1 := NewCluster("cluster1", apiv1.ConditionTrue)
cluster2 := NewCluster("cluster2", apiv1.ConditionTrue)
fakeClient := &fakefedclientset.Clientset{}
RegisterFakeList("clusters", &fakeClient.Fake, &federationapi.ClusterList{Items: []federationapi.Cluster{*cluster1}})
RegisterFakeList("configmaps", &fakeClient.Fake, &apiv1.ConfigMapList{Items: []apiv1.ConfigMap{}})
configmapWatch := RegisterFakeWatch("configmaps", &fakeClient.Fake)
clusterWatch := RegisterFakeWatch("clusters", &fakeClient.Fake)
cluster1Client := &fakekubeclientset.Clientset{}
cluster1Watch := RegisterFakeWatch("configmaps", &cluster1Client.Fake)
RegisterFakeList("configmaps", &cluster1Client.Fake, &apiv1.ConfigMapList{Items: []apiv1.ConfigMap{}})
cluster1CreateChan := RegisterFakeCopyOnCreate("configmaps", &cluster1Client.Fake, cluster1Watch)
cluster1UpdateChan := RegisterFakeCopyOnUpdate("configmaps", &cluster1Client.Fake, cluster1Watch)
cluster2Client := &fakekubeclientset.Clientset{}
cluster2Watch := RegisterFakeWatch("configmaps", &cluster2Client.Fake)
RegisterFakeList("configmaps", &cluster2Client.Fake, &apiv1.ConfigMapList{Items: []apiv1.ConfigMap{}})
cluster2CreateChan := RegisterFakeCopyOnCreate("configmaps", &cluster2Client.Fake, cluster2Watch)
configmapController := NewConfigMapController(fakeClient)
informer := ToFederatedInformerForTestOnly(configmapController.configmapFederatedInformer)
informer.SetClientFactory(func(cluster *federationapi.Cluster) (kubeclientset.Interface, error) {
switch cluster.Name {
case cluster1.Name:
return cluster1Client, nil
case cluster2.Name:
return cluster2Client, nil
default:
return nil, fmt.Errorf("Unknown cluster")
}
})
configmapController.clusterAvailableDelay = time.Second
configmapController.configmapReviewDelay = 50 * time.Millisecond
configmapController.smallDelay = 20 * time.Millisecond
configmapController.updateTimeout = 5 * time.Second
stop := make(chan struct{})
configmapController.Run(stop)
configmap1 := &apiv1.ConfigMap{
ObjectMeta: apiv1.ObjectMeta{
Name: "test-configmap",
Namespace: "ns",
SelfLink: "/api/v1/namespaces/ns/configmaps/test-configmap",
},
Data: map[string]string{
"A": "ala ma kota",
"B": "quick brown fox",
},
}
// Test add federated configmap.
configmapWatch.Add(configmap1)
createdConfigMap := GetConfigMapFromChan(cluster1CreateChan)
assert.NotNil(t, createdConfigMap)
assert.Equal(t, configmap1.Namespace, createdConfigMap.Namespace)
assert.Equal(t, configmap1.Name, createdConfigMap.Name)
assert.True(t, util.ConfigMapEquivalent(configmap1, createdConfigMap))
// Wait for the configmap to appear in the informer store
err := WaitForStoreUpdate(
configmapController.configmapFederatedInformer.GetTargetStore(),
cluster1.Name, types.NamespacedName{Namespace: configmap1.Namespace, Name: configmap1.Name}.String(), wait.ForeverTestTimeout)
assert.Nil(t, err, "configmap should have appeared in the informer store")
// Test update federated configmap.
configmap1.Annotations = map[string]string{
"A": "B",
}
configmapWatch.Modify(configmap1)
updatedConfigMap := GetConfigMapFromChan(cluster1UpdateChan)
assert.NotNil(t, updatedConfigMap)
assert.Equal(t, configmap1.Name, updatedConfigMap.Name)
assert.Equal(t, configmap1.Namespace, updatedConfigMap.Namespace)
assert.True(t, util.ConfigMapEquivalent(configmap1, updatedConfigMap))
// Test update federated configmap.
configmap1.Data = map[string]string{
"config": "myconfigurationfile",
}
configmapWatch.Modify(configmap1)
updatedConfigMap2 := GetConfigMapFromChan(cluster1UpdateChan)
assert.NotNil(t, updatedConfigMap)
assert.Equal(t, configmap1.Name, updatedConfigMap.Name)
assert.Equal(t, configmap1.Namespace, updatedConfigMap.Namespace)
assert.True(t, util.ConfigMapEquivalent(configmap1, updatedConfigMap2))
// Test add cluster
clusterWatch.Add(cluster2)
createdConfigMap2 := GetConfigMapFromChan(cluster2CreateChan)
assert.NotNil(t, createdConfigMap2)
assert.Equal(t, configmap1.Name, createdConfigMap2.Name)
assert.Equal(t, configmap1.Namespace, createdConfigMap2.Namespace)
assert.True(t, util.ConfigMapEquivalent(configmap1, createdConfigMap2))
close(stop)
}
func GetConfigMapFromChan(c chan runtime.Object) *apiv1.ConfigMap {
configmap := GetObjectFromChan(c).(*apiv1.ConfigMap)
return configmap
}

View file

@ -0,0 +1,60 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["daemonset_controller.go"],
tags = ["automanaged"],
deps = [
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/client/clientset_generated/federation_release_1_5:go_default_library",
"//federation/pkg/federation-controller/util:go_default_library",
"//federation/pkg/federation-controller/util/deletionhelper:go_default_library",
"//federation/pkg/federation-controller/util/eventsink:go_default_library",
"//pkg/api:go_default_library",
"//pkg/api/errors:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/conversion:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/flowcontrol:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = ["daemonset_controller_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/client/clientset_generated/federation_release_1_5/fake:go_default_library",
"//federation/pkg/federation-controller/util:go_default_library",
"//federation/pkg/federation-controller/util/deletionhelper:go_default_library",
"//federation/pkg/federation-controller/util/test:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/util/wait:go_default_library",
"//vendor:github.com/stretchr/testify/assert",
],
)

View file

@ -0,0 +1,472 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package daemonset
import (
"fmt"
"reflect"
"time"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/eventsink"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/conversion"
pkgruntime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/watch"
"github.com/golang/glog"
)
const (
allClustersKey = "ALL_CLUSTERS"
)
type DaemonSetController struct {
// For triggering single daemonset reconciliation. This is used when there is an
// add/update/delete operation on a daemonset in either federated API server or
// in some member of the federation.
daemonsetDeliverer *util.DelayingDeliverer
// For triggering all daemonsets reconciliation. This is used when
// a new cluster becomes available.
clusterDeliverer *util.DelayingDeliverer
// Contains daemonsets present in members of federation.
daemonsetFederatedInformer util.FederatedInformer
// For updating members of federation.
federatedUpdater util.FederatedUpdater
// Definitions of daemonsets that should be federated.
daemonsetInformerStore cache.Store
// Informer controller for daemonsets that should be federated.
daemonsetInformerController cache.ControllerInterface
// Client to federated api server.
federatedApiClient federationclientset.Interface
// Backoff manager for daemonsets
daemonsetBackoff *flowcontrol.Backoff
// For events
eventRecorder record.EventRecorder
deletionHelper *deletionhelper.DeletionHelper
daemonsetReviewDelay time.Duration
clusterAvailableDelay time.Duration
smallDelay time.Duration
updateTimeout time.Duration
}
// NewDaemonSetController returns a new daemonset controller
func NewDaemonSetController(client federationclientset.Interface) *DaemonSetController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
recorder := broadcaster.NewRecorder(apiv1.EventSource{Component: "federated-daemonset-controller"})
daemonsetcontroller := &DaemonSetController{
federatedApiClient: client,
daemonsetReviewDelay: time.Second * 10,
clusterAvailableDelay: time.Second * 20,
smallDelay: time.Second * 3,
updateTimeout: time.Second * 30,
daemonsetBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute),
eventRecorder: recorder,
}
// Build deliverers for triggering reconciliations.
daemonsetcontroller.daemonsetDeliverer = util.NewDelayingDeliverer()
daemonsetcontroller.clusterDeliverer = util.NewDelayingDeliverer()
// Start informer in federated API servers on daemonsets that should be federated.
daemonsetcontroller.daemonsetInformerStore, daemonsetcontroller.daemonsetInformerController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return client.Extensions().DaemonSets(apiv1.NamespaceAll).List(options)
},
WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return client.Extensions().DaemonSets(apiv1.NamespaceAll).Watch(options)
},
},
&extensionsv1.DaemonSet{},
controller.NoResyncPeriodFunc(),
util.NewTriggerOnAllChanges(func(obj pkgruntime.Object) { daemonsetcontroller.deliverDaemonSetObj(obj, 0, false) }))
// Federated informer on daemonsets in members of federation.
daemonsetcontroller.daemonsetFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return targetClient.Extensions().DaemonSets(apiv1.NamespaceAll).List(options)
},
WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return targetClient.Extensions().DaemonSets(apiv1.NamespaceAll).Watch(options)
},
},
&extensionsv1.DaemonSet{},
controller.NoResyncPeriodFunc(),
// Trigger reconciliation whenever something in federated cluster is changed. In most cases it
// would be just confirmation that some daemonset opration succeeded.
util.NewTriggerOnAllChanges(
func(obj pkgruntime.Object) {
daemonsetcontroller.deliverDaemonSetObj(obj, daemonsetcontroller.daemonsetReviewDelay, false)
},
))
},
&util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federationapi.Cluster) {
// When new cluster becomes available process all the daemonsets again.
daemonsetcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(daemonsetcontroller.clusterAvailableDelay))
},
},
)
// Federated updater along with Create/Update/Delete operations.
daemonsetcontroller.federatedUpdater = util.NewFederatedUpdater(daemonsetcontroller.daemonsetFederatedInformer,
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
daemonset := obj.(*extensionsv1.DaemonSet)
glog.V(4).Infof("Attempting to create daemonset: %s/%s", daemonset.Namespace, daemonset.Name)
_, err := client.Extensions().DaemonSets(daemonset.Namespace).Create(daemonset)
if err != nil {
glog.Errorf("Error creating daemonset %s/%s/: %v", daemonset.Namespace, daemonset.Name, err)
} else {
glog.V(4).Infof("Successfully created deamonset %s/%s", daemonset.Namespace, daemonset.Name)
}
return err
},
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
daemonset := obj.(*extensionsv1.DaemonSet)
glog.V(4).Infof("Attempting to update daemonset: %s/%s", daemonset.Namespace, daemonset.Name)
_, err := client.Extensions().DaemonSets(daemonset.Namespace).Update(daemonset)
if err != nil {
glog.Errorf("Error updating daemonset %s/%s/: %v", daemonset.Namespace, daemonset.Name, err)
} else {
glog.V(4).Infof("Successfully updating deamonset %s/%s", daemonset.Namespace, daemonset.Name)
}
return err
},
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
daemonset := obj.(*extensionsv1.DaemonSet)
glog.V(4).Infof("Attempting to delete daemonset: %s/%s", daemonset.Namespace, daemonset.Name)
err := client.Extensions().DaemonSets(daemonset.Namespace).Delete(daemonset.Name, &apiv1.DeleteOptions{})
if err != nil {
glog.Errorf("Error deleting daemonset %s/%s/: %v", daemonset.Namespace, daemonset.Name, err)
} else {
glog.V(4).Infof("Successfully deleting deamonset %s/%s", daemonset.Namespace, daemonset.Name)
}
return err
})
daemonsetcontroller.deletionHelper = deletionhelper.NewDeletionHelper(
daemonsetcontroller.hasFinalizerFunc,
daemonsetcontroller.removeFinalizerFunc,
daemonsetcontroller.addFinalizerFunc,
// objNameFunc
func(obj pkgruntime.Object) string {
daemonset := obj.(*extensionsv1.DaemonSet)
return daemonset.Name
},
daemonsetcontroller.updateTimeout,
daemonsetcontroller.eventRecorder,
daemonsetcontroller.daemonsetFederatedInformer,
daemonsetcontroller.federatedUpdater,
)
return daemonsetcontroller
}
// Returns true if the given object has the given finalizer in its ObjectMeta.
func (daemonsetcontroller *DaemonSetController) hasFinalizerFunc(obj pkgruntime.Object, finalizer string) bool {
daemonset := obj.(*extensionsv1.DaemonSet)
for i := range daemonset.ObjectMeta.Finalizers {
if string(daemonset.ObjectMeta.Finalizers[i]) == finalizer {
return true
}
}
return false
}
// Removes the finalizer from the given objects ObjectMeta.
// Assumes that the given object is a daemonset.
func (daemonsetcontroller *DaemonSetController) removeFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) {
daemonset := obj.(*extensionsv1.DaemonSet)
newFinalizers := []string{}
hasFinalizer := false
for i := range daemonset.ObjectMeta.Finalizers {
if string(daemonset.ObjectMeta.Finalizers[i]) != finalizer {
newFinalizers = append(newFinalizers, daemonset.ObjectMeta.Finalizers[i])
} else {
hasFinalizer = true
}
}
if !hasFinalizer {
// Nothing to do.
return obj, nil
}
daemonset.ObjectMeta.Finalizers = newFinalizers
daemonset, err := daemonsetcontroller.federatedApiClient.Extensions().DaemonSets(daemonset.Namespace).Update(daemonset)
if err != nil {
return nil, fmt.Errorf("failed to remove finalizer %s from daemonset %s: %v", finalizer, daemonset.Name, err)
}
return daemonset, nil
}
// Adds the given finalizer to the given objects ObjectMeta.
// Assumes that the given object is a daemonset.
func (daemonsetcontroller *DaemonSetController) addFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) {
daemonset := obj.(*extensionsv1.DaemonSet)
daemonset.ObjectMeta.Finalizers = append(daemonset.ObjectMeta.Finalizers, finalizer)
daemonset, err := daemonsetcontroller.federatedApiClient.Extensions().DaemonSets(daemonset.Namespace).Update(daemonset)
if err != nil {
return nil, fmt.Errorf("failed to add finalizer %s to daemonset %s: %v", finalizer, daemonset.Name, err)
}
return daemonset, nil
}
func (daemonsetcontroller *DaemonSetController) Run(stopChan <-chan struct{}) {
glog.V(1).Infof("Starting daemonset controllr")
go daemonsetcontroller.daemonsetInformerController.Run(stopChan)
glog.V(1).Infof("Starting daemonset federated informer")
daemonsetcontroller.daemonsetFederatedInformer.Start()
go func() {
<-stopChan
daemonsetcontroller.daemonsetFederatedInformer.Stop()
}()
glog.V(1).Infof("Starting daemonset deliverers")
daemonsetcontroller.daemonsetDeliverer.StartWithHandler(func(item *util.DelayingDelivererItem) {
daemonset := item.Value.(*types.NamespacedName)
glog.V(4).Infof("Trigerring reconciliation of daemonset %s", daemonset.String())
daemonsetcontroller.reconcileDaemonSet(daemonset.Namespace, daemonset.Name)
})
daemonsetcontroller.clusterDeliverer.StartWithHandler(func(_ *util.DelayingDelivererItem) {
glog.V(4).Infof("Triggering reconciliation of all daemonsets")
daemonsetcontroller.reconcileDaemonSetsOnClusterChange()
})
util.StartBackoffGC(daemonsetcontroller.daemonsetBackoff, stopChan)
}
func getDaemonSetKey(namespace, name string) string {
return types.NamespacedName{
Namespace: namespace,
Name: name,
}.String()
}
func (daemonsetcontroller *DaemonSetController) deliverDaemonSetObj(obj interface{}, delay time.Duration, failed bool) {
daemonset := obj.(*extensionsv1.DaemonSet)
daemonsetcontroller.deliverDaemonSet(daemonset.Namespace, daemonset.Name, delay, failed)
}
// Adds backoff to delay if this delivery is related to some failure. Resets backoff if there was no failure.
func (daemonsetcontroller *DaemonSetController) deliverDaemonSet(namespace string, name string, delay time.Duration, failed bool) {
key := getDaemonSetKey(namespace, name)
if failed {
daemonsetcontroller.daemonsetBackoff.Next(key, time.Now())
delay = delay + daemonsetcontroller.daemonsetBackoff.Get(key)
} else {
daemonsetcontroller.daemonsetBackoff.Reset(key)
}
daemonsetcontroller.daemonsetDeliverer.DeliverAfter(key,
&types.NamespacedName{Namespace: namespace, Name: name}, delay)
}
// Check whether all data stores are in sync. False is returned if any of the informer/stores is not yet
// synced with the corresponding api server.
func (daemonsetcontroller *DaemonSetController) isSynced() bool {
if !daemonsetcontroller.daemonsetFederatedInformer.ClustersSynced() {
glog.V(2).Infof("Cluster list not synced")
return false
}
clusters, err := daemonsetcontroller.daemonsetFederatedInformer.GetReadyClusters()
if err != nil {
glog.Errorf("Failed to get ready clusters: %v", err)
return false
}
if !daemonsetcontroller.daemonsetFederatedInformer.GetTargetStore().ClustersSynced(clusters) {
return false
}
return true
}
// The function triggers reconciliation of all federated daemonsets.
func (daemonsetcontroller *DaemonSetController) reconcileDaemonSetsOnClusterChange() {
if !daemonsetcontroller.isSynced() {
daemonsetcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(daemonsetcontroller.clusterAvailableDelay))
}
for _, obj := range daemonsetcontroller.daemonsetInformerStore.List() {
daemonset := obj.(*extensionsv1.DaemonSet)
daemonsetcontroller.deliverDaemonSet(daemonset.Namespace, daemonset.Name, daemonsetcontroller.smallDelay, false)
}
}
func (daemonsetcontroller *DaemonSetController) reconcileDaemonSet(namespace string, daemonsetName string) {
glog.V(4).Infof("Reconciling daemonset %s/%s", namespace, daemonsetName)
if !daemonsetcontroller.isSynced() {
glog.V(4).Infof("Daemonset controller is not synced")
daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, daemonsetcontroller.clusterAvailableDelay, false)
return
}
key := getDaemonSetKey(namespace, daemonsetName)
baseDaemonSetObjFromStore, exist, err := daemonsetcontroller.daemonsetInformerStore.GetByKey(key)
if err != nil {
glog.Errorf("Failed to query main daemonset store for %v: %v", key, err)
daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, 0, true)
return
}
if !exist {
glog.V(4).Infof("Skipping daemonset %s/%s - not federated", namespace, daemonsetName)
// Not federated daemonset, ignoring.
return
}
baseDaemonSetObj, err := conversion.NewCloner().DeepCopy(baseDaemonSetObjFromStore)
baseDaemonSet, ok := baseDaemonSetObj.(*extensionsv1.DaemonSet)
if err != nil || !ok {
glog.Errorf("Error in retrieving obj %s from store: %v, %v", daemonsetName, ok, err)
daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, 0, true)
return
}
if baseDaemonSet.DeletionTimestamp != nil {
if err := daemonsetcontroller.delete(baseDaemonSet); err != nil {
glog.Errorf("Failed to delete %s: %v", daemonsetName, err)
daemonsetcontroller.eventRecorder.Eventf(baseDaemonSet, api.EventTypeNormal, "DeleteFailed",
"DaemonSet delete failed: %v", err)
daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, 0, true)
}
return
}
glog.V(3).Infof("Ensuring delete object from underlying clusters finalizer for daemonset: %s",
baseDaemonSet.Name)
// Add the required finalizers before creating a daemonset in underlying clusters.
updatedDaemonSetObj, err := daemonsetcontroller.deletionHelper.EnsureFinalizers(baseDaemonSet)
if err != nil {
glog.Errorf("Failed to ensure delete object from underlying clusters finalizer in daemonset %s: %v",
baseDaemonSet.Name, err)
daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, 0, false)
return
}
baseDaemonSet = updatedDaemonSetObj.(*extensionsv1.DaemonSet)
glog.V(3).Infof("Syncing daemonset %s in underlying clusters", baseDaemonSet.Name)
clusters, err := daemonsetcontroller.daemonsetFederatedInformer.GetReadyClusters()
if err != nil {
glog.Errorf("Failed to get cluster list: %v", err)
daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, daemonsetcontroller.clusterAvailableDelay, false)
return
}
operations := make([]util.FederatedOperation, 0)
for _, cluster := range clusters {
clusterDaemonSetObj, found, err := daemonsetcontroller.daemonsetFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key)
if err != nil {
glog.Errorf("Failed to get %s from %s: %v", key, cluster.Name, err)
daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, 0, true)
return
}
// Do not modify. Otherwise make a deep copy.
desiredDaemonSet := &extensionsv1.DaemonSet{
ObjectMeta: util.DeepCopyRelevantObjectMeta(baseDaemonSet.ObjectMeta),
Spec: util.DeepCopyApiTypeOrPanic(baseDaemonSet.Spec).(extensionsv1.DaemonSetSpec),
}
if !found {
glog.V(4).Infof("Creating daemonset %s/%s in cluster %s", namespace, daemonsetName, cluster.Name)
daemonsetcontroller.eventRecorder.Eventf(baseDaemonSet, api.EventTypeNormal, "CreateInCluster",
"Creating daemonset in cluster %s", cluster.Name)
operations = append(operations, util.FederatedOperation{
Type: util.OperationTypeAdd,
Obj: desiredDaemonSet,
ClusterName: cluster.Name,
})
} else {
clusterDaemonSet := clusterDaemonSetObj.(*extensionsv1.DaemonSet)
// Update existing daemonset, if needed.
if !util.ObjectMetaEquivalent(desiredDaemonSet.ObjectMeta, clusterDaemonSet.ObjectMeta) ||
!reflect.DeepEqual(desiredDaemonSet.Spec, clusterDaemonSet.Spec) {
glog.V(4).Infof("Upadting daemonset %s/%s in cluster %s", namespace, daemonsetName, cluster.Name)
daemonsetcontroller.eventRecorder.Eventf(baseDaemonSet, api.EventTypeNormal, "UpdateInCluster",
"Updating daemonset in cluster %s", cluster.Name)
operations = append(operations, util.FederatedOperation{
Type: util.OperationTypeUpdate,
Obj: desiredDaemonSet,
ClusterName: cluster.Name,
})
}
}
}
if len(operations) == 0 {
glog.V(4).Infof("No operation needed for %s/%s", namespace, daemonsetName)
// Everything is in order
return
}
err = daemonsetcontroller.federatedUpdater.UpdateWithOnError(operations, daemonsetcontroller.updateTimeout,
func(op util.FederatedOperation, operror error) {
daemonsetcontroller.eventRecorder.Eventf(baseDaemonSet, api.EventTypeNormal, "UpdateInClusterFailed",
"DaemonSet update in cluster %s failed: %v", op.ClusterName, operror)
})
if err != nil {
glog.Errorf("Failed to execute updates for %s: %v, retrying shortly", key, err)
daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, 0, true)
return
}
}
// delete deletes the given daemonset or returns error if the deletion was not complete.
func (daemonsetcontroller *DaemonSetController) delete(daemonset *extensionsv1.DaemonSet) error {
glog.V(3).Infof("Handling deletion of daemonset: %v", *daemonset)
_, err := daemonsetcontroller.deletionHelper.HandleObjectInUnderlyingClusters(daemonset)
if err != nil {
return err
}
err = daemonsetcontroller.federatedApiClient.Extensions().DaemonSets(daemonset.Namespace).Delete(daemonset.Name, nil)
if err != nil {
// Its all good if the error is not found error. That means it is deleted already and we do not have to do anything.
// This is expected when we are processing an update as a result of daemonset finalizer deletion.
// The process that deleted the last finalizer is also going to delete the daemonset and we do not have to do anything.
if !errors.IsNotFound(err) {
return fmt.Errorf("failed to delete daemonset: %v", err)
}
}
return nil
}

View file

@ -0,0 +1,158 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package daemonset
import (
"fmt"
"reflect"
"testing"
"time"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fakefedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper"
. "k8s.io/kubernetes/federation/pkg/federation-controller/util/test"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait"
"github.com/stretchr/testify/assert"
)
func TestDaemonSetController(t *testing.T) {
cluster1 := NewCluster("cluster1", apiv1.ConditionTrue)
cluster2 := NewCluster("cluster2", apiv1.ConditionTrue)
fakeClient := &fakefedclientset.Clientset{}
RegisterFakeList("clusters", &fakeClient.Fake, &federationapi.ClusterList{Items: []federationapi.Cluster{*cluster1}})
RegisterFakeList("daemonsets", &fakeClient.Fake, &extensionsv1.DaemonSetList{Items: []extensionsv1.DaemonSet{}})
daemonsetWatch := RegisterFakeWatch("daemonsets", &fakeClient.Fake)
daemonsetUpdateChan := RegisterFakeCopyOnUpdate("daemonsets", &fakeClient.Fake, daemonsetWatch)
clusterWatch := RegisterFakeWatch("clusters", &fakeClient.Fake)
cluster1Client := &fakekubeclientset.Clientset{}
cluster1Watch := RegisterFakeWatch("daemonsets", &cluster1Client.Fake)
RegisterFakeList("daemonsets", &cluster1Client.Fake, &extensionsv1.DaemonSetList{Items: []extensionsv1.DaemonSet{}})
cluster1CreateChan := RegisterFakeCopyOnCreate("daemonsets", &cluster1Client.Fake, cluster1Watch)
cluster1UpdateChan := RegisterFakeCopyOnUpdate("daemonsets", &cluster1Client.Fake, cluster1Watch)
cluster2Client := &fakekubeclientset.Clientset{}
cluster2Watch := RegisterFakeWatch("daemonsets", &cluster2Client.Fake)
RegisterFakeList("daemonsets", &cluster2Client.Fake, &extensionsv1.DaemonSetList{Items: []extensionsv1.DaemonSet{}})
cluster2CreateChan := RegisterFakeCopyOnCreate("daemonsets", &cluster2Client.Fake, cluster2Watch)
daemonsetController := NewDaemonSetController(fakeClient)
informer := ToFederatedInformerForTestOnly(daemonsetController.daemonsetFederatedInformer)
informer.SetClientFactory(func(cluster *federationapi.Cluster) (kubeclientset.Interface, error) {
switch cluster.Name {
case cluster1.Name:
return cluster1Client, nil
case cluster2.Name:
return cluster2Client, nil
default:
return nil, fmt.Errorf("Unknown cluster")
}
})
daemonsetController.clusterAvailableDelay = time.Second
daemonsetController.daemonsetReviewDelay = 50 * time.Millisecond
daemonsetController.smallDelay = 20 * time.Millisecond
daemonsetController.updateTimeout = 5 * time.Second
stop := make(chan struct{})
daemonsetController.Run(stop)
daemonset1 := extensionsv1.DaemonSet{
ObjectMeta: apiv1.ObjectMeta{
Name: "test-daemonset",
Namespace: "ns",
SelfLink: "/api/v1/namespaces/ns/daemonsets/test-daemonset",
},
Spec: extensionsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: make(map[string]string),
},
},
}
// Test add federated daemonset.
daemonsetWatch.Add(&daemonset1)
// There should be 2 updates to add both the finalizers.
updatedDaemonSet := GetDaemonSetFromChan(daemonsetUpdateChan)
assert.True(t, daemonsetController.hasFinalizerFunc(updatedDaemonSet, deletionhelper.FinalizerDeleteFromUnderlyingClusters))
updatedDaemonSet = GetDaemonSetFromChan(daemonsetUpdateChan)
assert.True(t, daemonsetController.hasFinalizerFunc(updatedDaemonSet, apiv1.FinalizerOrphan))
daemonset1 = *updatedDaemonSet
createdDaemonSet := GetDaemonSetFromChan(cluster1CreateChan)
assert.NotNil(t, createdDaemonSet)
assert.Equal(t, daemonset1.Namespace, createdDaemonSet.Namespace)
assert.Equal(t, daemonset1.Name, createdDaemonSet.Name)
assert.True(t, daemonsetsEqual(daemonset1, *createdDaemonSet),
fmt.Sprintf("expected: %v, actual: %v", daemonset1, *createdDaemonSet))
// Wait for the daemonset to appear in the informer store
err := WaitForStoreUpdate(
daemonsetController.daemonsetFederatedInformer.GetTargetStore(),
cluster1.Name, getDaemonSetKey(daemonset1.Namespace, daemonset1.Name), wait.ForeverTestTimeout)
assert.Nil(t, err, "daemonset should have appeared in the informer store")
// TODO: Re-enable this when we have fixed these flaky tests: https://github.com/kubernetes/kubernetes/issues/36540.
// Test update federated daemonset.
daemonset1.Annotations = map[string]string{
"A": "B",
}
daemonsetWatch.Modify(&daemonset1)
updatedDaemonSet = GetDaemonSetFromChan(cluster1UpdateChan)
assert.NotNil(t, updatedDaemonSet)
assert.Equal(t, daemonset1.Name, updatedDaemonSet.Name)
assert.Equal(t, daemonset1.Namespace, updatedDaemonSet.Namespace)
assert.True(t, daemonsetsEqual(daemonset1, *updatedDaemonSet),
fmt.Sprintf("expected: %v, actual: %v", daemonset1, *updatedDaemonSet))
// Test update federated daemonset.
daemonset1.Spec.Template.Name = "TEST"
daemonsetWatch.Modify(&daemonset1)
err = CheckObjectFromChan(cluster1UpdateChan, MetaAndSpecCheckingFunction(&daemonset1))
assert.NoError(t, err)
// Test add cluster
clusterWatch.Add(cluster2)
createdDaemonSet2 := GetDaemonSetFromChan(cluster2CreateChan)
assert.NotNil(t, createdDaemonSet2)
assert.Equal(t, daemonset1.Name, createdDaemonSet2.Name)
assert.Equal(t, daemonset1.Namespace, createdDaemonSet2.Namespace)
assert.True(t, daemonsetsEqual(daemonset1, *createdDaemonSet2),
fmt.Sprintf("expected: %v, actual: %v", daemonset1, *createdDaemonSet2))
close(stop)
}
func daemonsetsEqual(a, b extensionsv1.DaemonSet) bool {
return util.ObjectMetaEquivalent(a.ObjectMeta, b.ObjectMeta) && reflect.DeepEqual(a.Spec, b.Spec)
}
func GetDaemonSetFromChan(c chan runtime.Object) *extensionsv1.DaemonSet {
daemonset := GetObjectFromChan(c).(*extensionsv1.DaemonSet)
return daemonset
}

View file

@ -0,0 +1,63 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["deploymentcontroller.go"],
tags = ["automanaged"],
deps = [
"//federation/apis/federation:go_default_library",
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/client/clientset_generated/federation_release_1_5:go_default_library",
"//federation/pkg/federation-controller/util:go_default_library",
"//federation/pkg/federation-controller/util/deletionhelper:go_default_library",
"//federation/pkg/federation-controller/util/eventsink:go_default_library",
"//federation/pkg/federation-controller/util/planner:go_default_library",
"//federation/pkg/federation-controller/util/podanalyzer:go_default_library",
"//pkg/api:go_default_library",
"//pkg/api/errors:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/conversion:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/util/flowcontrol:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/util/workqueue:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = ["deploymentcontroller_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/client/clientset_generated/federation_release_1_5/fake:go_default_library",
"//federation/pkg/federation-controller/util/test:go_default_library",
"//pkg/api/meta:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/wait:go_default_library",
"//vendor:github.com/stretchr/testify/assert",
],
)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,186 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"flag"
"fmt"
"testing"
"time"
fedv1 "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fakefedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
. "k8s.io/kubernetes/federation/pkg/federation-controller/util/test"
"k8s.io/kubernetes/pkg/api/meta"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/wait"
"github.com/stretchr/testify/assert"
)
func TestParseFederationDeploymentPreference(t *testing.T) {
successPrefs := []string{
`{"rebalance": true,
"clusters": {
"k8s-1": {"minReplicas": 10, "maxReplicas": 20, "weight": 2},
"*": {"weight": 1}
}}`,
}
failedPrefes := []string{
`{`, // bad json
}
rs := newDeploymentWithReplicas("d-1", 100)
accessor, _ := meta.Accessor(rs)
anno := accessor.GetAnnotations()
if anno == nil {
anno = make(map[string]string)
accessor.SetAnnotations(anno)
}
for _, prefString := range successPrefs {
anno[FedDeploymentPreferencesAnnotation] = prefString
pref, err := parseFederationDeploymentPreference(rs)
assert.NotNil(t, pref)
assert.Nil(t, err)
}
for _, prefString := range failedPrefes {
anno[FedDeploymentPreferencesAnnotation] = prefString
pref, err := parseFederationDeploymentPreference(rs)
assert.Nil(t, pref)
assert.NotNil(t, err)
}
}
func TestDeploymentController(t *testing.T) {
flag.Set("logtostderr", "true")
flag.Set("v", "5")
flag.Parse()
deploymentReviewDelay = 500 * time.Millisecond
clusterAvailableDelay = 100 * time.Millisecond
clusterUnavailableDelay = 100 * time.Millisecond
allDeploymentReviewDelay = 500 * time.Millisecond
cluster1 := NewCluster("cluster1", apiv1.ConditionTrue)
cluster2 := NewCluster("cluster2", apiv1.ConditionTrue)
fakeClient := &fakefedclientset.Clientset{}
RegisterFakeList("clusters", &fakeClient.Fake, &fedv1.ClusterList{Items: []fedv1.Cluster{*cluster1}})
deploymentsWatch := RegisterFakeWatch("deployments", &fakeClient.Fake)
clusterWatch := RegisterFakeWatch("clusters", &fakeClient.Fake)
cluster1Client := &fakekubeclientset.Clientset{}
cluster1Watch := RegisterFakeWatch("deployments", &cluster1Client.Fake)
_ = RegisterFakeWatch("pods", &cluster1Client.Fake)
RegisterFakeList("deployments", &cluster1Client.Fake, &extensionsv1.DeploymentList{Items: []extensionsv1.Deployment{}})
cluster1CreateChan := RegisterFakeCopyOnCreate("deployments", &cluster1Client.Fake, cluster1Watch)
cluster1UpdateChan := RegisterFakeCopyOnUpdate("deployments", &cluster1Client.Fake, cluster1Watch)
cluster2Client := &fakekubeclientset.Clientset{}
cluster2Watch := RegisterFakeWatch("deployments", &cluster2Client.Fake)
_ = RegisterFakeWatch("pods", &cluster2Client.Fake)
RegisterFakeList("deployments", &cluster2Client.Fake, &extensionsv1.DeploymentList{Items: []extensionsv1.Deployment{}})
cluster2CreateChan := RegisterFakeCopyOnCreate("deployments", &cluster2Client.Fake, cluster2Watch)
deploymentController := NewDeploymentController(fakeClient)
clientFactory := func(cluster *fedv1.Cluster) (kubeclientset.Interface, error) {
switch cluster.Name {
case cluster1.Name:
return cluster1Client, nil
case cluster2.Name:
return cluster2Client, nil
default:
return nil, fmt.Errorf("Unknown cluster")
}
}
ToFederatedInformerForTestOnly(deploymentController.fedDeploymentInformer).SetClientFactory(clientFactory)
ToFederatedInformerForTestOnly(deploymentController.fedPodInformer).SetClientFactory(clientFactory)
stop := make(chan struct{})
go deploymentController.Run(5, stop)
// Create deployment. Expect to see it in cluster1.
dep1 := newDeploymentWithReplicas("depA", 6)
deploymentsWatch.Add(dep1)
checkDeployment := func(base *extensionsv1.Deployment, replicas int32) CheckingFunction {
return func(obj runtime.Object) error {
if obj == nil {
return fmt.Errorf("Observed object is nil")
}
d := obj.(*extensionsv1.Deployment)
if err := CompareObjectMeta(base.ObjectMeta, d.ObjectMeta); err != nil {
return err
}
if replicas != *d.Spec.Replicas {
return fmt.Errorf("Replica count is different expected:%d observed:%d", replicas, *d.Spec.Replicas)
}
return nil
}
}
assert.NoError(t, CheckObjectFromChan(cluster1CreateChan, checkDeployment(dep1, *dep1.Spec.Replicas)))
err := WaitForStoreUpdate(
deploymentController.fedDeploymentInformer.GetTargetStore(),
cluster1.Name, types.NamespacedName{Namespace: dep1.Namespace, Name: dep1.Name}.String(), wait.ForeverTestTimeout)
assert.Nil(t, err, "deployment should have appeared in the informer store")
// Increase replica count. Expect to see the update in cluster1.
newRep := int32(8)
dep1.Spec.Replicas = &newRep
deploymentsWatch.Modify(dep1)
assert.NoError(t, CheckObjectFromChan(cluster1UpdateChan, checkDeployment(dep1, *dep1.Spec.Replicas)))
// Add new cluster. Although rebalance = false, no pods have been created yet so it should
// rebalance anyway.
clusterWatch.Add(cluster2)
assert.NoError(t, CheckObjectFromChan(cluster1UpdateChan, checkDeployment(dep1, *dep1.Spec.Replicas/2)))
assert.NoError(t, CheckObjectFromChan(cluster2CreateChan, checkDeployment(dep1, *dep1.Spec.Replicas/2)))
// Add new deployment with non-default replica placement preferences.
dep2 := newDeploymentWithReplicas("deployment2", 9)
dep2.Annotations = make(map[string]string)
dep2.Annotations[FedDeploymentPreferencesAnnotation] = `{"rebalance": true,
"clusters": {
"cluster1": {"weight": 2},
"cluster2": {"weight": 1}
}}`
deploymentsWatch.Add(dep2)
assert.NoError(t, CheckObjectFromChan(cluster1CreateChan, checkDeployment(dep2, 6)))
assert.NoError(t, CheckObjectFromChan(cluster2CreateChan, checkDeployment(dep2, 3)))
}
func GetDeploymentFromChan(c chan runtime.Object) *extensionsv1.Deployment {
secret := GetObjectFromChan(c).(*extensionsv1.Deployment)
return secret
}
func newDeploymentWithReplicas(name string, replicas int32) *extensionsv1.Deployment {
return &extensionsv1.Deployment{
ObjectMeta: apiv1.ObjectMeta{
Name: name,
Namespace: apiv1.NamespaceDefault,
SelfLink: "/api/v1/namespaces/default/deployments/name",
},
Spec: extensionsv1.DeploymentSpec{
Replicas: &replicas,
},
}
}

View file

@ -0,0 +1,19 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package federation_controller contains code for controllers (like the cluster
// controller).
package federation_controller // import "k8s.io/kubernetes/federation/pkg/federation-controller"

View file

@ -0,0 +1,62 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["ingress_controller.go"],
tags = ["automanaged"],
deps = [
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/client/clientset_generated/federation_release_1_5:go_default_library",
"//federation/pkg/federation-controller/util:go_default_library",
"//federation/pkg/federation-controller/util/deletionhelper:go_default_library",
"//federation/pkg/federation-controller/util/eventsink:go_default_library",
"//pkg/api:go_default_library",
"//pkg/api/errors:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/conversion:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/flowcontrol:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = ["ingress_controller_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/client/clientset_generated/federation_release_1_5/fake:go_default_library",
"//federation/pkg/federation-controller/util:go_default_library",
"//federation/pkg/federation-controller/util/deletionhelper:go_default_library",
"//federation/pkg/federation-controller/util/test:go_default_library",
"//pkg/api/errors:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/wait:go_default_library",
"//vendor:github.com/stretchr/testify/assert",
],
)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,294 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ingress
import (
"fmt"
"reflect"
"testing"
"time"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fakefedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper"
. "k8s.io/kubernetes/federation/pkg/federation-controller/util/test"
"k8s.io/kubernetes/pkg/api/errors"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
extensionsv1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/wait"
"github.com/stretchr/testify/assert"
)
func TestIngressController(t *testing.T) {
fakeClusterList := federationapi.ClusterList{Items: []federationapi.Cluster{}}
fakeConfigMapList1 := apiv1.ConfigMapList{Items: []apiv1.ConfigMap{}}
fakeConfigMapList2 := apiv1.ConfigMapList{Items: []apiv1.ConfigMap{}}
cluster1 := NewCluster("cluster1", apiv1.ConditionTrue)
cluster2 := NewCluster("cluster2", apiv1.ConditionTrue)
cfg1 := NewConfigMap("foo")
cfg2 := NewConfigMap("bar") // Different UID from cfg1, so that we can check that they get reconciled.
t.Log("Creating fake infrastructure")
fedClient := &fakefedclientset.Clientset{}
RegisterFakeList("clusters", &fedClient.Fake, &fakeClusterList)
RegisterFakeList("ingresses", &fedClient.Fake, &extensionsv1beta1.IngressList{Items: []extensionsv1beta1.Ingress{}})
fedIngressWatch := RegisterFakeWatch("ingresses", &fedClient.Fake)
clusterWatch := RegisterFakeWatch("clusters", &fedClient.Fake)
fedClusterUpdateChan := RegisterFakeCopyOnUpdate("clusters", &fedClient.Fake, clusterWatch)
//fedIngressUpdateChan := RegisterFakeCopyOnUpdate("ingresses", &fedClient.Fake, fedIngressWatch)
cluster1Client := &fakekubeclientset.Clientset{}
RegisterFakeList("ingresses", &cluster1Client.Fake, &extensionsv1beta1.IngressList{Items: []extensionsv1beta1.Ingress{}})
RegisterFakeList("configmaps", &cluster1Client.Fake, &fakeConfigMapList1)
cluster1IngressWatch := RegisterFakeWatch("ingresses", &cluster1Client.Fake)
cluster1ConfigMapWatch := RegisterFakeWatch("configmaps", &cluster1Client.Fake)
cluster1IngressCreateChan := RegisterFakeCopyOnCreate("ingresses", &cluster1Client.Fake, cluster1IngressWatch)
// cluster1IngressUpdateChan := RegisterFakeCopyOnUpdate("ingresses", &cluster1Client.Fake, cluster1IngressWatch)
cluster2Client := &fakekubeclientset.Clientset{}
RegisterFakeList("ingresses", &cluster2Client.Fake, &extensionsv1beta1.IngressList{Items: []extensionsv1beta1.Ingress{}})
RegisterFakeList("configmaps", &cluster2Client.Fake, &fakeConfigMapList2)
cluster2IngressWatch := RegisterFakeWatch("ingresses", &cluster2Client.Fake)
cluster2ConfigMapWatch := RegisterFakeWatch("configmaps", &cluster2Client.Fake)
cluster2IngressCreateChan := RegisterFakeCopyOnCreate("ingresses", &cluster2Client.Fake, cluster2IngressWatch)
cluster2ConfigMapUpdateChan := RegisterFakeCopyOnUpdate("configmaps", &cluster2Client.Fake, cluster2ConfigMapWatch)
clientFactoryFunc := func(cluster *federationapi.Cluster) (kubeclientset.Interface, error) {
switch cluster.Name {
case cluster1.Name:
return cluster1Client, nil
case cluster2.Name:
return cluster2Client, nil
default:
return nil, fmt.Errorf("Unknown cluster")
}
}
ingressController := NewIngressController(fedClient)
ingressInformer := ToFederatedInformerForTestOnly(ingressController.ingressFederatedInformer)
ingressInformer.SetClientFactory(clientFactoryFunc)
configMapInformer := ToFederatedInformerForTestOnly(ingressController.configMapFederatedInformer)
configMapInformer.SetClientFactory(clientFactoryFunc)
ingressController.clusterAvailableDelay = time.Second
ingressController.ingressReviewDelay = 10 * time.Millisecond
ingressController.configMapReviewDelay = 10 * time.Millisecond
ingressController.smallDelay = 20 * time.Millisecond
ingressController.updateTimeout = 5 * time.Second
stop := make(chan struct{})
t.Log("Running Ingress Controller")
ingressController.Run(stop)
// TODO: Here we are creating the ingress with first cluster annotation.
// Add another test without that annotation when
// https://github.com/kubernetes/kubernetes/issues/36540 is fixed.
ing1 := extensionsv1beta1.Ingress{
ObjectMeta: apiv1.ObjectMeta{
Name: "test-ingress",
Namespace: "mynamespace",
SelfLink: "/api/v1/namespaces/mynamespace/ingress/test-ingress",
Annotations: map[string]string{
firstClusterAnnotation: cluster1.Name,
},
},
Status: extensionsv1beta1.IngressStatus{
LoadBalancer: apiv1.LoadBalancerStatus{
Ingress: make([]apiv1.LoadBalancerIngress, 0, 0),
},
},
}
t.Log("Adding cluster 1")
clusterWatch.Add(cluster1)
t.Log("Adding Ingress UID ConfigMap to cluster 1")
cluster1ConfigMapWatch.Add(cfg1)
t.Log("Checking that UID annotation on Cluster 1 annotation was correctly updated")
cluster := GetClusterFromChan(fedClusterUpdateChan)
assert.NotNil(t, cluster)
assert.Equal(t, cluster.ObjectMeta.Annotations[uidAnnotationKey], cfg1.Data[uidKey])
// Test add federated ingress.
t.Log("Adding Federated Ingress")
fedIngressWatch.Add(&ing1)
/*
// TODO: Re-enable this when we have fixed these flaky tests: https://github.com/kubernetes/kubernetes/issues/36540.
t.Logf("Checking that approproate finalizers are added")
// There should be 2 updates to add both the finalizers.
updatedIngress := GetIngressFromChan(t, fedIngressUpdateChan)
assert.True(t, ingressController.hasFinalizerFunc(updatedIngress, deletionhelper.FinalizerDeleteFromUnderlyingClusters))
updatedIngress = GetIngressFromChan(t, fedIngressUpdateChan)
assert.True(t, ingressController.hasFinalizerFunc(updatedIngress, apiv1.FinalizerOrphan), fmt.Sprintf("ingress does not have the orphan finalizer: %v", updatedIngress))
ing1 = *updatedIngress
*/
t.Log("Checking that Ingress was correctly created in cluster 1")
createdIngress := GetIngressFromChan(t, cluster1IngressCreateChan)
assert.NotNil(t, createdIngress)
assert.True(t, reflect.DeepEqual(ing1.Spec, createdIngress.Spec), "Spec of created ingress is not equal")
assert.True(t, util.ObjectMetaEquivalent(ing1.ObjectMeta, createdIngress.ObjectMeta), "Metadata of created object is not equivalent")
// Wait for finalizers to appear in federation store.
// assert.NoError(t, WaitForFinalizersInFederationStore(ingressController, ingressController.ingressInformerStore,
// types.NamespacedName{Namespace: ing1.Namespace, Name: ing1.Name}.String()), "finalizers not found in federated ingress")
// Wait for the cluster ingress to appear in cluster store.
assert.NoError(t, WaitForIngressInClusterStore(ingressController.ingressFederatedInformer.GetTargetStore(), cluster1.Name,
types.NamespacedName{Namespace: createdIngress.Namespace, Name: createdIngress.Name}.String()),
"Created ingress not found in underlying cluster store")
/*
// TODO: Re-enable this when we have fixed these flaky tests: https://github.com/kubernetes/kubernetes/issues/36540.
// Test that IP address gets transferred from cluster ingress to federated ingress.
t.Log("Checking that IP address gets transferred from cluster ingress to federated ingress")
createdIngress.Status.LoadBalancer.Ingress = append(createdIngress.Status.LoadBalancer.Ingress, apiv1.LoadBalancerIngress{IP: "1.2.3.4"})
cluster1IngressWatch.Modify(createdIngress)
// Wait for store to see the updated cluster ingress.
assert.NoError(t, WaitForStatusUpdate(t, ingressController.ingressFederatedInformer.GetTargetStore(),
cluster1.Name, types.NamespacedName{Namespace: createdIngress.Namespace, Name: createdIngress.Name}.String(),
createdIngress.Status.LoadBalancer, 4*wait.ForeverTestTimeout))
updatedIngress = GetIngressFromChan(t, fedIngressUpdateChan)
assert.NotNil(t, updatedIngress, "Cluster's ingress load balancer status was not correctly transferred to the federated ingress")
if updatedIngress != nil {
assert.True(t, reflect.DeepEqual(createdIngress.Status.LoadBalancer.Ingress, updatedIngress.Status.LoadBalancer.Ingress), fmt.Sprintf("Ingress IP was not transferred from cluster ingress to federated ingress. %v is not equal to %v", createdIngress.Status.LoadBalancer.Ingress, updatedIngress.Status.LoadBalancer.Ingress))
t.Logf("expected: %v, actual: %v", createdIngress, updatedIngress)
}
// Test update federated ingress.
if updatedIngress.ObjectMeta.Annotations == nil {
updatedIngress.ObjectMeta.Annotations = make(map[string]string)
}
updatedIngress.ObjectMeta.Annotations["A"] = "B"
t.Log("Modifying Federated Ingress")
fedIngressWatch.Modify(updatedIngress)
t.Log("Checking that Ingress was correctly updated in cluster 1")
updatedIngress2 := GetIngressFromChan(t, cluster1IngressUpdateChan)
assert.NotNil(t, updatedIngress2)
assert.True(t, reflect.DeepEqual(updatedIngress2.Spec, updatedIngress.Spec), "Spec of updated ingress is not equal")
assert.Equal(t, updatedIngress2.ObjectMeta.Annotations["A"], updatedIngress.ObjectMeta.Annotations["A"], "Updated annotation not transferred from federated to cluster ingress.")
*/
// Test add cluster
t.Log("Adding a second cluster")
ing1.Annotations[staticIPNameKeyWritable] = "foo" // Make sure that the base object has a static IP name first.
fedIngressWatch.Modify(&ing1)
clusterWatch.Add(cluster2)
// First check that the original values are not equal - see above comment
assert.NotEqual(t, cfg1.Data[uidKey], cfg2.Data[uidKey], fmt.Sprintf("ConfigMap in cluster 2 must initially not equal that in cluster 1 for this test - please fix test"))
cluster2ConfigMapWatch.Add(cfg2)
t.Log("Checking that the ingress got created in cluster 2")
createdIngress2 := GetIngressFromChan(t, cluster2IngressCreateChan)
assert.NotNil(t, createdIngress2)
assert.True(t, reflect.DeepEqual(ing1.Spec, createdIngress2.Spec), "Spec of created ingress is not equal")
assert.True(t, util.ObjectMetaEquivalent(ing1.ObjectMeta, createdIngress2.ObjectMeta), "Metadata of created object is not equivalent")
t.Log("Checking that the configmap in cluster 2 got updated.")
updatedConfigMap2 := GetConfigMapFromChan(cluster2ConfigMapUpdateChan)
assert.NotNil(t, updatedConfigMap2, fmt.Sprintf("ConfigMap in cluster 2 was not updated (or more likely the test is broken and the API type written is wrong)"))
if updatedConfigMap2 != nil {
assert.Equal(t, cfg1.Data[uidKey], updatedConfigMap2.Data[uidKey],
fmt.Sprintf("UID's in configmaps in cluster's 1 and 2 are not equal (%q != %q)", cfg1.Data["uid"], updatedConfigMap2.Data["uid"]))
}
close(stop)
}
func GetIngressFromChan(t *testing.T, c chan runtime.Object) *extensionsv1beta1.Ingress {
obj := GetObjectFromChan(c)
ingress, ok := obj.(*extensionsv1beta1.Ingress)
if !ok {
t.Logf("Object on channel was not of type *extensionsv1beta1.Ingress: %v", obj)
}
return ingress
}
func GetConfigMapFromChan(c chan runtime.Object) *apiv1.ConfigMap {
configMap, _ := GetObjectFromChan(c).(*apiv1.ConfigMap)
return configMap
}
func GetClusterFromChan(c chan runtime.Object) *federationapi.Cluster {
cluster, _ := GetObjectFromChan(c).(*federationapi.Cluster)
return cluster
}
func NewConfigMap(uid string) *apiv1.ConfigMap {
return &apiv1.ConfigMap{
ObjectMeta: apiv1.ObjectMeta{
Name: uidConfigMapName,
Namespace: uidConfigMapNamespace,
SelfLink: "/api/v1/namespaces/" + uidConfigMapNamespace + "/configmap/" + uidConfigMapName,
// TODO: Remove: Annotations: map[string]string{},
},
Data: map[string]string{
uidKey: uid,
},
}
}
// Wait for finalizers to appear in federation store.
func WaitForFinalizersInFederationStore(ingressController *IngressController, store cache.Store, key string) error {
retryInterval := 100 * time.Millisecond
timeout := wait.ForeverTestTimeout
err := wait.PollImmediate(retryInterval, timeout, func() (bool, error) {
obj, found, err := store.GetByKey(key)
if !found || err != nil {
return false, err
}
ingress := obj.(*extensionsv1beta1.Ingress)
if ingressController.hasFinalizerFunc(ingress, apiv1.FinalizerOrphan) &&
ingressController.hasFinalizerFunc(ingress, deletionhelper.FinalizerDeleteFromUnderlyingClusters) {
return true, nil
}
return false, nil
})
return err
}
// Wait for the cluster ingress to appear in cluster store.
func WaitForIngressInClusterStore(store util.FederatedReadOnlyStore, clusterName, key string) error {
retryInterval := 100 * time.Millisecond
timeout := wait.ForeverTestTimeout
err := wait.PollImmediate(retryInterval, timeout, func() (bool, error) {
_, found, err := store.GetByKey(clusterName, key)
if found && err == nil {
return true, nil
}
if errors.IsNotFound(err) {
return false, nil
}
return false, err
})
return err
}
// Wait for ingress status to be updated to match the desiredStatus.
func WaitForStatusUpdate(t *testing.T, store util.FederatedReadOnlyStore, clusterName, key string, desiredStatus apiv1.LoadBalancerStatus, timeout time.Duration) error {
retryInterval := 100 * time.Millisecond
err := wait.PollImmediate(retryInterval, timeout, func() (bool, error) {
obj, found, err := store.GetByKey(clusterName, key)
if !found || err != nil {
return false, err
}
ingress := obj.(*extensionsv1beta1.Ingress)
return reflect.DeepEqual(ingress.Status.LoadBalancer, desiredStatus), nil
})
return err
}

View file

@ -0,0 +1,59 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["namespace_controller.go"],
tags = ["automanaged"],
deps = [
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/client/clientset_generated/federation_release_1_5:go_default_library",
"//federation/pkg/federation-controller/util:go_default_library",
"//federation/pkg/federation-controller/util/deletionhelper:go_default_library",
"//federation/pkg/federation-controller/util/eventsink:go_default_library",
"//pkg/api:go_default_library",
"//pkg/api/errors:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/conversion:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/util/flowcontrol:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = ["namespace_controller_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/client/clientset_generated/federation_release_1_5/fake:go_default_library",
"//federation/pkg/federation-controller/util:go_default_library",
"//federation/pkg/federation-controller/util/deletionhelper:go_default_library",
"//federation/pkg/federation-controller/util/test:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/util/wait:go_default_library",
"//vendor:github.com/stretchr/testify/assert",
],
)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,206 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package namespace
import (
"fmt"
"testing"
"time"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fakefedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper"
. "k8s.io/kubernetes/federation/pkg/federation-controller/util/test"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait"
"github.com/stretchr/testify/assert"
)
func TestNamespaceController(t *testing.T) {
cluster1 := NewCluster("cluster1", apiv1.ConditionTrue)
cluster2 := NewCluster("cluster2", apiv1.ConditionTrue)
ns1 := apiv1.Namespace{
ObjectMeta: apiv1.ObjectMeta{
Name: "test-namespace",
SelfLink: "/api/v1/namespaces/test-namespace",
},
Spec: apiv1.NamespaceSpec{
Finalizers: []apiv1.FinalizerName{apiv1.FinalizerKubernetes},
},
}
fakeClient := &fakefedclientset.Clientset{}
RegisterFakeList("clusters", &fakeClient.Fake, &federationapi.ClusterList{Items: []federationapi.Cluster{*cluster1}})
RegisterFakeList("namespaces", &fakeClient.Fake, &apiv1.NamespaceList{Items: []apiv1.Namespace{}})
namespaceWatch := RegisterFakeWatch("namespaces", &fakeClient.Fake)
namespaceCreateChan := RegisterFakeCopyOnCreate("namespaces", &fakeClient.Fake, namespaceWatch)
clusterWatch := RegisterFakeWatch("clusters", &fakeClient.Fake)
cluster1Client := &fakekubeclientset.Clientset{}
cluster1Watch := RegisterFakeWatch("namespaces", &cluster1Client.Fake)
RegisterFakeList("namespaces", &cluster1Client.Fake, &apiv1.NamespaceList{Items: []apiv1.Namespace{}})
cluster1CreateChan := RegisterFakeCopyOnCreate("namespaces", &cluster1Client.Fake, cluster1Watch)
// cluster1UpdateChan := RegisterFakeCopyOnUpdate("namespaces", &cluster1Client.Fake, cluster1Watch)
cluster2Client := &fakekubeclientset.Clientset{}
cluster2Watch := RegisterFakeWatch("namespaces", &cluster2Client.Fake)
RegisterFakeList("namespaces", &cluster2Client.Fake, &apiv1.NamespaceList{Items: []apiv1.Namespace{}})
cluster2CreateChan := RegisterFakeCopyOnCreate("namespaces", &cluster2Client.Fake, cluster2Watch)
RegisterFakeList("replicasets", &fakeClient.Fake, &extensionsv1.ReplicaSetList{Items: []extensionsv1.ReplicaSet{
{
ObjectMeta: apiv1.ObjectMeta{
Name: "test-rs",
Namespace: ns1.Namespace,
}}}})
RegisterFakeList("secrets", &fakeClient.Fake, &apiv1.SecretList{Items: []apiv1.Secret{
{
ObjectMeta: apiv1.ObjectMeta{
Name: "test-secret",
Namespace: ns1.Namespace,
}}}})
RegisterFakeList("services", &fakeClient.Fake, &apiv1.ServiceList{Items: []apiv1.Service{
{
ObjectMeta: apiv1.ObjectMeta{
Name: "test-service",
Namespace: ns1.Namespace,
}}}})
nsDeleteChan := RegisterDelete(&fakeClient.Fake, "namespaces")
rsDeleteChan := RegisterDeleteCollection(&fakeClient.Fake, "replicasets")
serviceDeleteChan := RegisterDeleteCollection(&fakeClient.Fake, "services")
secretDeleteChan := RegisterDeleteCollection(&fakeClient.Fake, "secrets")
namespaceController := NewNamespaceController(fakeClient)
informerClientFactory := func(cluster *federationapi.Cluster) (kubeclientset.Interface, error) {
switch cluster.Name {
case cluster1.Name:
return cluster1Client, nil
case cluster2.Name:
return cluster2Client, nil
default:
return nil, fmt.Errorf("Unknown cluster")
}
}
setClientFactory(namespaceController.namespaceFederatedInformer, informerClientFactory)
namespaceController.clusterAvailableDelay = time.Second
namespaceController.namespaceReviewDelay = 50 * time.Millisecond
namespaceController.smallDelay = 20 * time.Millisecond
namespaceController.updateTimeout = 5 * time.Second
stop := make(chan struct{})
namespaceController.Run(stop)
// Test add federated namespace.
namespaceWatch.Add(&ns1)
// Verify that the DeleteFromUnderlyingClusters finalizer is added to the namespace.
// Note: finalize invokes the create action in Fake client.
// TODO: Seems like a bug. Should invoke update. Fix it.
updatedNamespace := GetNamespaceFromChan(namespaceCreateChan)
assert.True(t, namespaceController.hasFinalizerFunc(updatedNamespace, deletionhelper.FinalizerDeleteFromUnderlyingClusters))
ns1 = *updatedNamespace
// Verify that the namespace is created in underlying cluster1.
createdNamespace := GetNamespaceFromChan(cluster1CreateChan)
assert.NotNil(t, createdNamespace)
assert.Equal(t, ns1.Name, createdNamespace.Name)
// Wait for the namespace to appear in the informer store
err := WaitForStoreUpdate(
namespaceController.namespaceFederatedInformer.GetTargetStore(),
cluster1.Name, ns1.Name, wait.ForeverTestTimeout)
assert.Nil(t, err, "namespace should have appeared in the informer store")
/*
// TODO: Uncomment this once we have figured out why this is flaky.
// Test update federated namespace.
ns1.Annotations = map[string]string{
"A": "B",
}
namespaceWatch.Modify(&ns1)
updatedNamespace = GetNamespaceFromChan(cluster1UpdateChan)
assert.NotNil(t, updatedNamespace)
assert.Equal(t, ns1.Name, updatedNamespace.Name)
// assert.Contains(t, updatedNamespace.Annotations, "A")
*/
// Test add cluster
clusterWatch.Add(cluster2)
createdNamespace2 := GetNamespaceFromChan(cluster2CreateChan)
assert.NotNil(t, createdNamespace2)
assert.Equal(t, ns1.Name, createdNamespace2.Name)
// assert.Contains(t, createdNamespace2.Annotations, "A")
// Delete the namespace with orphan finalizer (let namespaces
// in underlying clusters be as is).
// TODO: Add a test without orphan finalizer.
ns1.ObjectMeta.Finalizers = append(ns1.ObjectMeta.Finalizers, apiv1.FinalizerOrphan)
ns1.DeletionTimestamp = &metav1.Time{Time: time.Now()}
namespaceWatch.Modify(&ns1)
assert.Equal(t, ns1.Name, GetStringFromChan(nsDeleteChan))
assert.Equal(t, "all", GetStringFromChan(rsDeleteChan))
assert.Equal(t, "all", GetStringFromChan(serviceDeleteChan))
assert.Equal(t, "all", GetStringFromChan(secretDeleteChan))
close(stop)
}
func setClientFactory(informer util.FederatedInformer, informerClientFactory func(*federationapi.Cluster) (kubeclientset.Interface, error)) {
testInformer := ToFederatedInformerForTestOnly(informer)
testInformer.SetClientFactory(informerClientFactory)
}
func RegisterDeleteCollection(client *core.Fake, resource string) chan string {
deleteChan := make(chan string, 100)
client.AddReactor("delete-collection", resource, func(action core.Action) (bool, runtime.Object, error) {
deleteChan <- "all"
return true, nil, nil
})
return deleteChan
}
func RegisterDelete(client *core.Fake, resource string) chan string {
deleteChan := make(chan string, 100)
client.AddReactor("delete", resource, func(action core.Action) (bool, runtime.Object, error) {
deleteAction := action.(core.DeleteAction)
deleteChan <- deleteAction.GetName()
return true, nil, nil
})
return deleteChan
}
func GetStringFromChan(c chan string) string {
select {
case str := <-c:
return str
case <-time.After(5 * time.Second):
return "timedout"
}
}
func GetNamespaceFromChan(c chan runtime.Object) *apiv1.Namespace {
namespace := GetObjectFromChan(c).(*apiv1.Namespace)
return namespace
}

View file

@ -0,0 +1,62 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["replicasetcontroller.go"],
tags = ["automanaged"],
deps = [
"//federation/apis/federation:go_default_library",
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/client/clientset_generated/federation_release_1_5:go_default_library",
"//federation/pkg/federation-controller/util:go_default_library",
"//federation/pkg/federation-controller/util/deletionhelper:go_default_library",
"//federation/pkg/federation-controller/util/eventsink:go_default_library",
"//federation/pkg/federation-controller/util/planner:go_default_library",
"//federation/pkg/federation-controller/util/podanalyzer:go_default_library",
"//pkg/api:go_default_library",
"//pkg/api/errors:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/conversion:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/util/flowcontrol:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/util/workqueue:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = ["replicasetcontroller_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/client/clientset_generated/federation_release_1_5/fake:go_default_library",
"//federation/pkg/federation-controller/util/test:go_default_library",
"//pkg/api/meta:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/stretchr/testify/assert",
],
)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,193 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package replicaset
import (
"flag"
"fmt"
"testing"
"time"
fedv1 "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fedclientfake "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/test"
"k8s.io/kubernetes/pkg/api/meta"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
kubeclientfake "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/watch"
"github.com/stretchr/testify/assert"
)
func TestParseFederationReplicaSetReference(t *testing.T) {
successPrefs := []string{
`{"rebalance": true,
"clusters": {
"k8s-1": {"minReplicas": 10, "maxReplicas": 20, "weight": 2},
"*": {"weight": 1}
}}`,
}
failedPrefes := []string{
`{`, // bad json
}
rs := newReplicaSetWithReplicas("rs-1", 100)
accessor, _ := meta.Accessor(rs)
anno := accessor.GetAnnotations()
if anno == nil {
anno = make(map[string]string)
accessor.SetAnnotations(anno)
}
for _, prefString := range successPrefs {
anno[FedReplicaSetPreferencesAnnotation] = prefString
pref, err := parseFederationReplicaSetReference(rs)
assert.NotNil(t, pref)
assert.Nil(t, err)
}
for _, prefString := range failedPrefes {
anno[FedReplicaSetPreferencesAnnotation] = prefString
pref, err := parseFederationReplicaSetReference(rs)
assert.Nil(t, pref)
assert.NotNil(t, err)
}
}
func TestReplicaSetController(t *testing.T) {
flag.Set("logtostderr", "true")
flag.Set("v", "5")
flag.Parse()
replicaSetReviewDelay = 10 * time.Millisecond
clusterAvailableDelay = 20 * time.Millisecond
clusterUnavailableDelay = 60 * time.Millisecond
allReplicaSetReviewDelay = 120 * time.Millisecond
fedclientset := fedclientfake.NewSimpleClientset()
fedrswatch := watch.NewFake()
fedclientset.PrependWatchReactor("replicasets", core.DefaultWatchReactor(fedrswatch, nil))
fedclientset.Federation().Clusters().Create(testutil.NewCluster("k8s-1", apiv1.ConditionTrue))
fedclientset.Federation().Clusters().Create(testutil.NewCluster("k8s-2", apiv1.ConditionTrue))
kube1clientset := kubeclientfake.NewSimpleClientset()
kube1rswatch := watch.NewFake()
kube1clientset.PrependWatchReactor("replicasets", core.DefaultWatchReactor(kube1rswatch, nil))
kube1Podwatch := watch.NewFake()
kube1clientset.PrependWatchReactor("pods", core.DefaultWatchReactor(kube1Podwatch, nil))
kube2clientset := kubeclientfake.NewSimpleClientset()
kube2rswatch := watch.NewFake()
kube2clientset.PrependWatchReactor("replicasets", core.DefaultWatchReactor(kube2rswatch, nil))
kube2Podwatch := watch.NewFake()
kube2clientset.PrependWatchReactor("pods", core.DefaultWatchReactor(kube2Podwatch, nil))
fedInformerClientFactory := func(cluster *fedv1.Cluster) (kubeclientset.Interface, error) {
switch cluster.Name {
case "k8s-1":
return kube1clientset, nil
case "k8s-2":
return kube2clientset, nil
default:
return nil, fmt.Errorf("Unknown cluster: %v", cluster.Name)
}
}
replicaSetController := NewReplicaSetController(fedclientset)
rsFedinformer := testutil.ToFederatedInformerForTestOnly(replicaSetController.fedReplicaSetInformer)
rsFedinformer.SetClientFactory(fedInformerClientFactory)
podFedinformer := testutil.ToFederatedInformerForTestOnly(replicaSetController.fedPodInformer)
podFedinformer.SetClientFactory(fedInformerClientFactory)
stopChan := make(chan struct{})
defer close(stopChan)
go replicaSetController.Run(1, stopChan)
rs := newReplicaSetWithReplicas("rs", 9)
rs, _ = fedclientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Create(rs)
fedrswatch.Add(rs)
time.Sleep(1 * time.Second)
rs1, _ := kube1clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name)
kube1rswatch.Add(rs1)
rs1.Status.Replicas = *rs1.Spec.Replicas
rs1.Status.FullyLabeledReplicas = *rs1.Spec.Replicas
rs1.Status.ReadyReplicas = *rs1.Spec.Replicas
rs1.Status.AvailableReplicas = *rs1.Spec.Replicas
rs1, _ = kube1clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).UpdateStatus(rs1)
kube1rswatch.Modify(rs1)
rs2, _ := kube2clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name)
kube2rswatch.Add(rs2)
rs2.Status.Replicas = *rs2.Spec.Replicas
rs2.Status.FullyLabeledReplicas = *rs2.Spec.Replicas
rs2.Status.ReadyReplicas = *rs2.Spec.Replicas
rs2.Status.AvailableReplicas = *rs2.Spec.Replicas
rs2, _ = kube2clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).UpdateStatus(rs2)
kube2rswatch.Modify(rs2)
time.Sleep(1 * time.Second)
rs, _ = fedclientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name)
assert.Equal(t, *rs.Spec.Replicas, *rs1.Spec.Replicas+*rs2.Spec.Replicas)
assert.Equal(t, rs.Status.Replicas, rs1.Status.Replicas+rs2.Status.Replicas)
assert.Equal(t, rs.Status.FullyLabeledReplicas, rs1.Status.FullyLabeledReplicas+rs2.Status.FullyLabeledReplicas)
assert.Equal(t, rs.Status.ReadyReplicas, rs1.Status.ReadyReplicas+rs2.Status.ReadyReplicas)
assert.Equal(t, rs.Status.AvailableReplicas, rs1.Status.AvailableReplicas+rs2.Status.AvailableReplicas)
var replicas int32 = 20
rs.Spec.Replicas = &replicas
rs, _ = fedclientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Update(rs)
fedrswatch.Modify(rs)
time.Sleep(1 * time.Second)
rs1, _ = kube1clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name)
rs1.Status.Replicas = *rs1.Spec.Replicas
rs1.Status.FullyLabeledReplicas = *rs1.Spec.Replicas
rs1.Status.ReadyReplicas = *rs1.Spec.Replicas
rs1.Status.AvailableReplicas = *rs1.Spec.Replicas
rs1, _ = kube1clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).UpdateStatus(rs1)
kube1rswatch.Modify(rs1)
rs2, _ = kube2clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name)
rs2.Status.Replicas = *rs2.Spec.Replicas
rs2.Status.FullyLabeledReplicas = *rs2.Spec.Replicas
rs2.Status.ReadyReplicas = *rs2.Spec.Replicas
rs2.Status.AvailableReplicas = *rs2.Spec.Replicas
rs2, _ = kube2clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).UpdateStatus(rs2)
kube2rswatch.Modify(rs2)
time.Sleep(1 * time.Second)
rs, _ = fedclientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name)
assert.Equal(t, *rs.Spec.Replicas, *rs1.Spec.Replicas+*rs2.Spec.Replicas)
assert.Equal(t, rs.Status.Replicas, rs1.Status.Replicas+rs2.Status.Replicas)
assert.Equal(t, rs.Status.FullyLabeledReplicas, rs1.Status.FullyLabeledReplicas+rs2.Status.FullyLabeledReplicas)
assert.Equal(t, rs.Status.ReadyReplicas, rs1.Status.ReadyReplicas+rs2.Status.ReadyReplicas)
assert.Equal(t, rs.Status.AvailableReplicas, rs1.Status.AvailableReplicas+rs2.Status.AvailableReplicas)
}
func newReplicaSetWithReplicas(name string, replicas int32) *extensionsv1.ReplicaSet {
return &extensionsv1.ReplicaSet{
ObjectMeta: apiv1.ObjectMeta{
Name: name,
Namespace: apiv1.NamespaceDefault,
SelfLink: "/api/v1/namespaces/default/replicasets/name",
},
Spec: extensionsv1.ReplicaSetSpec{
Replicas: &replicas,
},
}
}

View file

@ -0,0 +1,58 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["secret_controller.go"],
tags = ["automanaged"],
deps = [
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/client/clientset_generated/federation_release_1_5:go_default_library",
"//federation/pkg/federation-controller/util:go_default_library",
"//federation/pkg/federation-controller/util/deletionhelper:go_default_library",
"//federation/pkg/federation-controller/util/eventsink:go_default_library",
"//pkg/api:go_default_library",
"//pkg/api/errors:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/conversion:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/flowcontrol:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = ["secret_controller_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/client/clientset_generated/federation_release_1_5/fake:go_default_library",
"//federation/pkg/federation-controller/util:go_default_library",
"//federation/pkg/federation-controller/util/deletionhelper:go_default_library",
"//federation/pkg/federation-controller/util/test:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/wait:go_default_library",
"//vendor:github.com/stretchr/testify/assert",
],
)

View file

@ -0,0 +1,436 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package secret
import (
"fmt"
"time"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/eventsink"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/conversion"
pkgruntime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/watch"
"github.com/golang/glog"
)
const (
allClustersKey = "ALL_CLUSTERS"
)
type SecretController struct {
// For triggering single secret reconciliation. This is used when there is an
// add/update/delete operation on a secret in either federated API server or
// in some member of the federation.
secretDeliverer *util.DelayingDeliverer
// For triggering all secrets reconciliation. This is used when
// a new cluster becomes available.
clusterDeliverer *util.DelayingDeliverer
// Contains secrets present in members of federation.
secretFederatedInformer util.FederatedInformer
// For updating members of federation.
federatedUpdater util.FederatedUpdater
// Definitions of secrets that should be federated.
secretInformerStore cache.Store
// Informer controller for secrets that should be federated.
secretInformerController cache.ControllerInterface
// Client to federated api server.
federatedApiClient federationclientset.Interface
// Backoff manager for secrets
secretBackoff *flowcontrol.Backoff
// For events
eventRecorder record.EventRecorder
deletionHelper *deletionhelper.DeletionHelper
secretReviewDelay time.Duration
clusterAvailableDelay time.Duration
smallDelay time.Duration
updateTimeout time.Duration
}
// NewSecretController returns a new secret controller
func NewSecretController(client federationclientset.Interface) *SecretController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
recorder := broadcaster.NewRecorder(apiv1.EventSource{Component: "federated-secrets-controller"})
secretcontroller := &SecretController{
federatedApiClient: client,
secretReviewDelay: time.Second * 10,
clusterAvailableDelay: time.Second * 20,
smallDelay: time.Second * 3,
updateTimeout: time.Second * 30,
secretBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute),
eventRecorder: recorder,
}
// Build delivereres for triggering reconciliations.
secretcontroller.secretDeliverer = util.NewDelayingDeliverer()
secretcontroller.clusterDeliverer = util.NewDelayingDeliverer()
// Start informer in federated API servers on secrets that should be federated.
secretcontroller.secretInformerStore, secretcontroller.secretInformerController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return client.Core().Secrets(apiv1.NamespaceAll).List(options)
},
WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return client.Core().Secrets(apiv1.NamespaceAll).Watch(options)
},
},
&apiv1.Secret{},
controller.NoResyncPeriodFunc(),
util.NewTriggerOnAllChanges(func(obj pkgruntime.Object) { secretcontroller.deliverSecretObj(obj, 0, false) }))
// Federated informer on secrets in members of federation.
secretcontroller.secretFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return targetClient.Core().Secrets(apiv1.NamespaceAll).List(options)
},
WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return targetClient.Core().Secrets(apiv1.NamespaceAll).Watch(options)
},
},
&apiv1.Secret{},
controller.NoResyncPeriodFunc(),
// Trigger reconciliation whenever something in federated cluster is changed. In most cases it
// would be just confirmation that some secret opration succeeded.
util.NewTriggerOnAllChanges(
func(obj pkgruntime.Object) {
secretcontroller.deliverSecretObj(obj, secretcontroller.secretReviewDelay, false)
},
))
},
&util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federationapi.Cluster) {
// When new cluster becomes available process all the secrets again.
secretcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(secretcontroller.clusterAvailableDelay))
},
},
)
// Federated updeater along with Create/Update/Delete operations.
secretcontroller.federatedUpdater = util.NewFederatedUpdater(secretcontroller.secretFederatedInformer,
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
secret := obj.(*apiv1.Secret)
_, err := client.Core().Secrets(secret.Namespace).Create(secret)
return err
},
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
secret := obj.(*apiv1.Secret)
_, err := client.Core().Secrets(secret.Namespace).Update(secret)
return err
},
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
secret := obj.(*apiv1.Secret)
err := client.Core().Secrets(secret.Namespace).Delete(secret.Name, &apiv1.DeleteOptions{})
return err
})
secretcontroller.deletionHelper = deletionhelper.NewDeletionHelper(
secretcontroller.hasFinalizerFunc,
secretcontroller.removeFinalizerFunc,
secretcontroller.addFinalizerFunc,
// objNameFunc
func(obj pkgruntime.Object) string {
secret := obj.(*apiv1.Secret)
return secret.Name
},
secretcontroller.updateTimeout,
secretcontroller.eventRecorder,
secretcontroller.secretFederatedInformer,
secretcontroller.federatedUpdater,
)
return secretcontroller
}
// Returns true if the given object has the given finalizer in its ObjectMeta.
func (secretcontroller *SecretController) hasFinalizerFunc(obj pkgruntime.Object, finalizer string) bool {
secret := obj.(*apiv1.Secret)
for i := range secret.ObjectMeta.Finalizers {
if string(secret.ObjectMeta.Finalizers[i]) == finalizer {
return true
}
}
return false
}
// Removes the finalizer from the given objects ObjectMeta.
// Assumes that the given object is a secret.
func (secretcontroller *SecretController) removeFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) {
secret := obj.(*apiv1.Secret)
newFinalizers := []string{}
hasFinalizer := false
for i := range secret.ObjectMeta.Finalizers {
if string(secret.ObjectMeta.Finalizers[i]) != finalizer {
newFinalizers = append(newFinalizers, secret.ObjectMeta.Finalizers[i])
} else {
hasFinalizer = true
}
}
if !hasFinalizer {
// Nothing to do.
return obj, nil
}
secret.ObjectMeta.Finalizers = newFinalizers
secret, err := secretcontroller.federatedApiClient.Core().Secrets(secret.Namespace).Update(secret)
if err != nil {
return nil, fmt.Errorf("failed to remove finalizer %s from secret %s: %v", finalizer, secret.Name, err)
}
return secret, nil
}
// Adds the given finalizer to the given objects ObjectMeta.
// Assumes that the given object is a secret.
func (secretcontroller *SecretController) addFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) {
secret := obj.(*apiv1.Secret)
secret.ObjectMeta.Finalizers = append(secret.ObjectMeta.Finalizers, finalizer)
secret, err := secretcontroller.federatedApiClient.Core().Secrets(secret.Namespace).Update(secret)
if err != nil {
return nil, fmt.Errorf("failed to add finalizer %s to secret %s: %v", finalizer, secret.Name, err)
}
return secret, nil
}
func (secretcontroller *SecretController) Run(stopChan <-chan struct{}) {
go secretcontroller.secretInformerController.Run(stopChan)
secretcontroller.secretFederatedInformer.Start()
go func() {
<-stopChan
secretcontroller.secretFederatedInformer.Stop()
}()
secretcontroller.secretDeliverer.StartWithHandler(func(item *util.DelayingDelivererItem) {
secret := item.Value.(*types.NamespacedName)
secretcontroller.reconcileSecret(*secret)
})
secretcontroller.clusterDeliverer.StartWithHandler(func(_ *util.DelayingDelivererItem) {
secretcontroller.reconcileSecretsOnClusterChange()
})
util.StartBackoffGC(secretcontroller.secretBackoff, stopChan)
}
func (secretcontroller *SecretController) deliverSecretObj(obj interface{}, delay time.Duration, failed bool) {
secret := obj.(*apiv1.Secret)
secretcontroller.deliverSecret(types.NamespacedName{Namespace: secret.Namespace, Name: secret.Name}, delay, failed)
}
// Adds backoff to delay if this delivery is related to some failure. Resets backoff if there was no failure.
func (secretcontroller *SecretController) deliverSecret(secret types.NamespacedName, delay time.Duration, failed bool) {
key := secret.String()
if failed {
secretcontroller.secretBackoff.Next(key, time.Now())
delay = delay + secretcontroller.secretBackoff.Get(key)
} else {
secretcontroller.secretBackoff.Reset(key)
}
secretcontroller.secretDeliverer.DeliverAfter(key, &secret, delay)
}
// Check whether all data stores are in sync. False is returned if any of the informer/stores is not yet
// synced with the corresponding api server.
func (secretcontroller *SecretController) isSynced() bool {
if !secretcontroller.secretFederatedInformer.ClustersSynced() {
glog.V(2).Infof("Cluster list not synced")
return false
}
clusters, err := secretcontroller.secretFederatedInformer.GetReadyClusters()
if err != nil {
glog.Errorf("Failed to get ready clusters: %v", err)
return false
}
if !secretcontroller.secretFederatedInformer.GetTargetStore().ClustersSynced(clusters) {
return false
}
return true
}
// The function triggers reconciliation of all federated secrets.
func (secretcontroller *SecretController) reconcileSecretsOnClusterChange() {
if !secretcontroller.isSynced() {
secretcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(secretcontroller.clusterAvailableDelay))
}
for _, obj := range secretcontroller.secretInformerStore.List() {
secret := obj.(*apiv1.Secret)
secretcontroller.deliverSecret(types.NamespacedName{Namespace: secret.Namespace, Name: secret.Name}, secretcontroller.smallDelay, false)
}
}
func (secretcontroller *SecretController) reconcileSecret(secret types.NamespacedName) {
if !secretcontroller.isSynced() {
secretcontroller.deliverSecret(secret, secretcontroller.clusterAvailableDelay, false)
return
}
key := secret.String()
baseSecretObjFromStore, exist, err := secretcontroller.secretInformerStore.GetByKey(key)
if err != nil {
glog.Errorf("Failed to query main secret store for %v: %v", key, err)
secretcontroller.deliverSecret(secret, 0, true)
return
}
if !exist {
// Not federated secret, ignoring.
return
}
// Create a copy before modifying the obj to prevent race condition with
// other readers of obj from store.
baseSecretObj, err := conversion.NewCloner().DeepCopy(baseSecretObjFromStore)
baseSecret, ok := baseSecretObj.(*apiv1.Secret)
if err != nil || !ok {
glog.Errorf("Error in retrieving obj from store: %v, %v", ok, err)
secretcontroller.deliverSecret(secret, 0, true)
return
}
if baseSecret.DeletionTimestamp != nil {
if err := secretcontroller.delete(baseSecret); err != nil {
glog.Errorf("Failed to delete %s: %v", secret, err)
secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "DeleteFailed",
"Secret delete failed: %v", err)
secretcontroller.deliverSecret(secret, 0, true)
}
return
}
glog.V(3).Infof("Ensuring delete object from underlying clusters finalizer for secret: %s",
baseSecret.Name)
// Add the required finalizers before creating a secret in underlying clusters.
updatedSecretObj, err := secretcontroller.deletionHelper.EnsureFinalizers(baseSecret)
if err != nil {
glog.Errorf("Failed to ensure delete object from underlying clusters finalizer in secret %s: %v",
baseSecret.Name, err)
secretcontroller.deliverSecret(secret, 0, false)
return
}
baseSecret = updatedSecretObj.(*apiv1.Secret)
glog.V(3).Infof("Syncing secret %s in underlying clusters", baseSecret.Name)
clusters, err := secretcontroller.secretFederatedInformer.GetReadyClusters()
if err != nil {
glog.Errorf("Failed to get cluster list: %v", err)
secretcontroller.deliverSecret(secret, secretcontroller.clusterAvailableDelay, false)
return
}
operations := make([]util.FederatedOperation, 0)
for _, cluster := range clusters {
clusterSecretObj, found, err := secretcontroller.secretFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key)
if err != nil {
glog.Errorf("Failed to get %s from %s: %v", key, cluster.Name, err)
secretcontroller.deliverSecret(secret, 0, true)
return
}
// The data should not be modified.
desiredSecret := &apiv1.Secret{
ObjectMeta: util.DeepCopyRelevantObjectMeta(baseSecret.ObjectMeta),
Data: baseSecret.Data,
Type: baseSecret.Type,
}
if !found {
secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "CreateInCluster",
"Creating secret in cluster %s", cluster.Name)
operations = append(operations, util.FederatedOperation{
Type: util.OperationTypeAdd,
Obj: desiredSecret,
ClusterName: cluster.Name,
})
} else {
clusterSecret := clusterSecretObj.(*apiv1.Secret)
// Update existing secret, if needed.
if !util.SecretEquivalent(*desiredSecret, *clusterSecret) {
secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "UpdateInCluster",
"Updating secret in cluster %s", cluster.Name)
operations = append(operations, util.FederatedOperation{
Type: util.OperationTypeUpdate,
Obj: desiredSecret,
ClusterName: cluster.Name,
})
}
}
}
if len(operations) == 0 {
// Everything is in order
return
}
err = secretcontroller.federatedUpdater.UpdateWithOnError(operations, secretcontroller.updateTimeout,
func(op util.FederatedOperation, operror error) {
secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "UpdateInClusterFailed",
"Secret update in cluster %s failed: %v", op.ClusterName, operror)
})
if err != nil {
glog.Errorf("Failed to execute updates for %s: %v", key, err)
secretcontroller.deliverSecret(secret, 0, true)
return
}
// Evertyhing is in order but lets be double sure
secretcontroller.deliverSecret(secret, secretcontroller.secretReviewDelay, false)
}
// delete deletes the given secret or returns error if the deletion was not complete.
func (secretcontroller *SecretController) delete(secret *apiv1.Secret) error {
glog.V(3).Infof("Handling deletion of secret: %v", *secret)
_, err := secretcontroller.deletionHelper.HandleObjectInUnderlyingClusters(secret)
if err != nil {
return err
}
err = secretcontroller.federatedApiClient.Core().Secrets(secret.Namespace).Delete(secret.Name, nil)
if err != nil {
// Its all good if the error is not found error. That means it is deleted already and we do not have to do anything.
// This is expected when we are processing an update as a result of secret finalizer deletion.
// The process that deleted the last finalizer is also going to delete the secret and we do not have to do anything.
if !errors.IsNotFound(err) {
return fmt.Errorf("failed to delete secret: %v", err)
}
}
return nil
}

View file

@ -0,0 +1,196 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package secret
import (
"fmt"
"reflect"
"testing"
"time"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fakefedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper"
. "k8s.io/kubernetes/federation/pkg/federation-controller/util/test"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/wait"
"github.com/stretchr/testify/assert"
)
func TestSecretController(t *testing.T) {
cluster1 := NewCluster("cluster1", apiv1.ConditionTrue)
cluster2 := NewCluster("cluster2", apiv1.ConditionTrue)
fakeClient := &fakefedclientset.Clientset{}
RegisterFakeList("clusters", &fakeClient.Fake, &federationapi.ClusterList{Items: []federationapi.Cluster{*cluster1}})
RegisterFakeList("secrets", &fakeClient.Fake, &apiv1.SecretList{Items: []apiv1.Secret{}})
secretWatch := RegisterFakeWatch("secrets", &fakeClient.Fake)
secretUpdateChan := RegisterFakeCopyOnUpdate("secrets", &fakeClient.Fake, secretWatch)
clusterWatch := RegisterFakeWatch("clusters", &fakeClient.Fake)
cluster1Client := &fakekubeclientset.Clientset{}
cluster1Watch := RegisterFakeWatch("secrets", &cluster1Client.Fake)
RegisterFakeList("secrets", &cluster1Client.Fake, &apiv1.SecretList{Items: []apiv1.Secret{}})
cluster1CreateChan := RegisterFakeCopyOnCreate("secrets", &cluster1Client.Fake, cluster1Watch)
// cluster1UpdateChan := RegisterFakeCopyOnUpdate("secrets", &cluster1Client.Fake, cluster1Watch)
cluster2Client := &fakekubeclientset.Clientset{}
cluster2Watch := RegisterFakeWatch("secrets", &cluster2Client.Fake)
RegisterFakeList("secrets", &cluster2Client.Fake, &apiv1.SecretList{Items: []apiv1.Secret{}})
cluster2CreateChan := RegisterFakeCopyOnCreate("secrets", &cluster2Client.Fake, cluster2Watch)
secretController := NewSecretController(fakeClient)
informerClientFactory := func(cluster *federationapi.Cluster) (kubeclientset.Interface, error) {
switch cluster.Name {
case cluster1.Name:
return cluster1Client, nil
case cluster2.Name:
return cluster2Client, nil
default:
return nil, fmt.Errorf("Unknown cluster")
}
}
setClientFactory(secretController.secretFederatedInformer, informerClientFactory)
secretController.clusterAvailableDelay = time.Second
secretController.secretReviewDelay = 50 * time.Millisecond
secretController.smallDelay = 20 * time.Millisecond
secretController.updateTimeout = 5 * time.Second
stop := make(chan struct{})
secretController.Run(stop)
secret1 := apiv1.Secret{
ObjectMeta: apiv1.ObjectMeta{
Name: "test-secret",
Namespace: "ns",
SelfLink: "/api/v1/namespaces/ns/secrets/test-secret",
},
Data: map[string][]byte{
"A": []byte("ala ma kota"),
"B": []byte("quick brown fox"),
},
Type: apiv1.SecretTypeOpaque,
}
// Test add federated secret.
secretWatch.Add(&secret1)
// There should be 2 updates to add both the finalizers.
updatedSecret := GetSecretFromChan(secretUpdateChan)
assert.True(t, secretController.hasFinalizerFunc(updatedSecret, deletionhelper.FinalizerDeleteFromUnderlyingClusters))
updatedSecret = GetSecretFromChan(secretUpdateChan)
assert.True(t, secretController.hasFinalizerFunc(updatedSecret, apiv1.FinalizerOrphan))
secret1 = *updatedSecret
// Verify that the secret is created in underlying cluster1.
createdSecret := GetSecretFromChan(cluster1CreateChan)
assert.NotNil(t, createdSecret)
assert.Equal(t, secret1.Namespace, createdSecret.Namespace)
assert.Equal(t, secret1.Name, createdSecret.Name)
assert.True(t, secretsEqual(secret1, *createdSecret),
fmt.Sprintf("expected: %v, actual: %v", secret1, *createdSecret))
// Wait for the secret to appear in the informer store
err := WaitForStoreUpdate(
secretController.secretFederatedInformer.GetTargetStore(),
cluster1.Name, types.NamespacedName{Namespace: secret1.Namespace, Name: secret1.Name}.String(), wait.ForeverTestTimeout)
assert.Nil(t, err, "secret should have appeared in the informer store")
/*
// TODO: Uncomment this once we have figured out why this is flaky.
// Test update federated secret.
secret1.Annotations = map[string]string{
"A": "B",
}
secretWatch.Modify(&secret1)
updatedSecret = GetSecretFromChan(cluster1UpdateChan)
assert.NotNil(t, updatedSecret)
assert.Equal(t, secret1.Name, updatedSecret.Name)
assert.Equal(t, secret1.Namespace, updatedSecret.Namespace)
assert.True(t, secretsEqual(secret1, *updatedSecret),
fmt.Sprintf("expected: %v, actual: %v", secret1, *updatedSecret))
// Wait for the secret to be updated in the informer store.
err = WaitForSecretStoreUpdate(
secretController.secretFederatedInformer.GetTargetStore(),
cluster1.Name, types.NamespacedName{Namespace: secret1.Namespace, Name: secret1.Name}.String(),
updatedSecret, wait.ForeverTestTimeout)
assert.Nil(t, err, "secret should have been updated in the informer store")
// Test update federated secret.
secret1.Data = map[string][]byte{
"config": []byte("myconfigurationfile"),
}
secretWatch.Modify(&secret1)
updatedSecret2 := GetSecretFromChan(cluster1UpdateChan)
assert.NotNil(t, updatedSecret2)
assert.Equal(t, secret1.Name, updatedSecret2.Name)
assert.Equal(t, secret1.Namespace, updatedSecret.Namespace)
assert.True(t, secretsEqual(secret1, *updatedSecret2),
fmt.Sprintf("expected: %v, actual: %v", secret1, *updatedSecret2))
*/
// Test add cluster
clusterWatch.Add(cluster2)
createdSecret2 := GetSecretFromChan(cluster2CreateChan)
assert.NotNil(t, createdSecret2)
assert.Equal(t, secret1.Name, createdSecret2.Name)
assert.Equal(t, secret1.Namespace, createdSecret2.Namespace)
assert.True(t, secretsEqual(secret1, *createdSecret2),
fmt.Sprintf("expected: %v, actual: %v", secret1, *createdSecret2))
close(stop)
}
func setClientFactory(informer util.FederatedInformer, informerClientFactory func(*federationapi.Cluster) (kubeclientset.Interface, error)) {
testInformer := ToFederatedInformerForTestOnly(informer)
testInformer.SetClientFactory(informerClientFactory)
}
func secretsEqual(a, b apiv1.Secret) bool {
// Clear the SelfLink and ObjectMeta.Finalizers since they will be different
// in resoure in federation control plane and resource in underlying cluster.
a.SelfLink = ""
b.SelfLink = ""
a.ObjectMeta.Finalizers = []string{}
b.ObjectMeta.Finalizers = []string{}
return reflect.DeepEqual(a, b)
}
func GetSecretFromChan(c chan runtime.Object) *apiv1.Secret {
secret := GetObjectFromChan(c).(*apiv1.Secret)
return secret
}
// Wait till the store is updated with latest secret.
func WaitForSecretStoreUpdate(store util.FederatedReadOnlyStore, clusterName, key string, desiredSecret *apiv1.Secret, timeout time.Duration) error {
retryInterval := 100 * time.Millisecond
err := wait.PollImmediate(retryInterval, timeout, func() (bool, error) {
obj, found, err := store.GetByKey(clusterName, key)
if !found || err != nil {
return false, err
}
equal := secretsEqual(*obj.(*apiv1.Secret), *desiredSecret)
return equal, err
})
return err
}

View file

@ -0,0 +1,65 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"cluster_helper.go",
"dns.go",
"doc.go",
"endpoint_helper.go",
"service_helper.go",
"servicecontroller.go",
],
tags = ["automanaged"],
deps = [
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/client/cache:go_default_library",
"//federation/client/clientset_generated/federation_release_1_5:go_default_library",
"//federation/pkg/dnsprovider:go_default_library",
"//federation/pkg/dnsprovider/rrstype:go_default_library",
"//federation/pkg/federation-controller/util:go_default_library",
"//pkg/api/errors:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/client/restclient:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/conversion:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/util/runtime:go_default_library",
"//pkg/util/sets:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/util/workqueue:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = [
"dns_test.go",
"endpoint_helper_test.go",
"service_helper_test.go",
"servicecontroller_test.go",
],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/pkg/dnsprovider/providers/google/clouddns:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/util/sets:go_default_library",
],
)

View file

@ -0,0 +1,205 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"sync"
v1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1"
v1 "k8s.io/kubernetes/pkg/api/v1"
cache "k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient"
pkgruntime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/util/workqueue"
"k8s.io/kubernetes/pkg/watch"
"reflect"
"github.com/golang/glog"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
)
type clusterCache struct {
clientset *kubeclientset.Clientset
cluster *v1beta1.Cluster
// A store of services, populated by the serviceController
serviceStore cache.StoreToServiceLister
// Watches changes to all services
serviceController *cache.Controller
// A store of endpoint, populated by the serviceController
endpointStore cache.StoreToEndpointsLister
// Watches changes to all endpoints
endpointController *cache.Controller
// services that need to be synced
serviceQueue *workqueue.Type
// endpoints that need to be synced
endpointQueue *workqueue.Type
}
type clusterClientCache struct {
rwlock sync.Mutex // protects serviceMap
clientMap map[string]*clusterCache
}
func (cc *clusterClientCache) startClusterLW(cluster *v1beta1.Cluster, clusterName string) {
cachedClusterClient, ok := cc.clientMap[clusterName]
// only create when no existing cachedClusterClient
if ok {
if !reflect.DeepEqual(cachedClusterClient.cluster.Spec, cluster.Spec) {
//rebuild clientset when cluster spec is changed
clientset, err := newClusterClientset(cluster)
if err != nil || clientset == nil {
glog.Errorf("Failed to create corresponding restclient of kubernetes cluster: %v", err)
}
glog.V(4).Infof("Cluster spec changed, rebuild clientset for cluster %s", clusterName)
cachedClusterClient.clientset = clientset
go cachedClusterClient.serviceController.Run(wait.NeverStop)
go cachedClusterClient.endpointController.Run(wait.NeverStop)
glog.V(2).Infof("Start watching services and endpoints on cluster %s", clusterName)
} else {
// do nothing when there is no spec change
glog.V(4).Infof("Keep clientset for cluster %s", clusterName)
return
}
} else {
glog.V(4).Infof("No client cache for cluster %s, building new", clusterName)
clientset, err := newClusterClientset(cluster)
if err != nil || clientset == nil {
glog.Errorf("Failed to create corresponding restclient of kubernetes cluster: %v", err)
}
cachedClusterClient = &clusterCache{
cluster: cluster,
clientset: clientset,
serviceQueue: workqueue.New(),
endpointQueue: workqueue.New(),
}
cachedClusterClient.endpointStore.Store, cachedClusterClient.endpointController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
return clientset.Core().Endpoints(v1.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return clientset.Core().Endpoints(v1.NamespaceAll).Watch(options)
},
},
&v1.Endpoints{},
serviceSyncPeriod,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
cc.enqueueEndpoint(obj, clusterName)
},
UpdateFunc: func(old, cur interface{}) {
cc.enqueueEndpoint(cur, clusterName)
},
DeleteFunc: func(obj interface{}) {
cc.enqueueEndpoint(obj, clusterName)
},
},
)
cachedClusterClient.serviceStore.Indexer, cachedClusterClient.serviceController = cache.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
return clientset.Core().Services(v1.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return clientset.Core().Services(v1.NamespaceAll).Watch(options)
},
},
&v1.Service{},
serviceSyncPeriod,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
cc.enqueueService(obj, clusterName)
},
UpdateFunc: func(old, cur interface{}) {
oldService, ok := old.(*v1.Service)
if !ok {
return
}
curService, ok := cur.(*v1.Service)
if !ok {
return
}
if !reflect.DeepEqual(oldService.Status.LoadBalancer, curService.Status.LoadBalancer) {
cc.enqueueService(cur, clusterName)
}
},
DeleteFunc: func(obj interface{}) {
service, _ := obj.(*v1.Service)
cc.enqueueService(obj, clusterName)
glog.V(2).Infof("Service %s/%s deletion found and enque to service store %s", service.Namespace, service.Name, clusterName)
},
},
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
cc.clientMap[clusterName] = cachedClusterClient
go cachedClusterClient.serviceController.Run(wait.NeverStop)
go cachedClusterClient.endpointController.Run(wait.NeverStop)
glog.V(2).Infof("Start watching services and endpoints on cluster %s", clusterName)
}
}
//TODO: copied from cluster controller, to make this as common function in pass 2
// delFromClusterSet delete a cluster from clusterSet and
// delete the corresponding restclient from the map clusterKubeClientMap
func (cc *clusterClientCache) delFromClusterSet(obj interface{}) {
cluster, ok := obj.(*v1beta1.Cluster)
cc.rwlock.Lock()
defer cc.rwlock.Unlock()
if ok {
delete(cc.clientMap, cluster.Name)
} else {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Infof("Object contained wasn't a cluster or a deleted key: %+v", obj)
return
}
glog.Infof("Found tombstone for %v", obj)
delete(cc.clientMap, tombstone.Key)
}
}
// addToClusterSet inserts the new cluster to clusterSet and creates a corresponding
// restclient to map clusterKubeClientMap
func (cc *clusterClientCache) addToClientMap(obj interface{}) {
cc.rwlock.Lock()
defer cc.rwlock.Unlock()
cluster, ok := obj.(*v1beta1.Cluster)
if !ok {
return
}
pred := getClusterConditionPredicate()
// check status
// skip if not ready
if pred(*cluster) {
cc.startClusterLW(cluster, cluster.Name)
}
}
func newClusterClientset(c *v1beta1.Cluster) (*kubeclientset.Clientset, error) {
clusterConfig, err := util.BuildClusterConfig(c)
if clusterConfig != nil {
clientset := kubeclientset.NewForConfigOrDie(restclient.AddUserAgent(clusterConfig, UserAgentName))
return clientset, nil
}
return nil, err
}

View file

@ -0,0 +1,366 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"fmt"
"net"
"github.com/golang/glog"
"strings"
"k8s.io/kubernetes/federation/pkg/dnsprovider"
"k8s.io/kubernetes/federation/pkg/dnsprovider/rrstype"
)
const (
// minDnsTtl is the minimum safe DNS TTL value to use (in seconds). We use this as the TTL for all DNS records.
minDnsTtl = 180
)
// getHealthyEndpoints returns the hostnames and/or IP addresses of healthy endpoints for the service, at a zone, region and global level (or an error)
func (s *ServiceController) getHealthyEndpoints(clusterName string, cachedService *cachedService) (zoneEndpoints, regionEndpoints, globalEndpoints []string, err error) {
var (
zoneNames []string
regionName string
)
if zoneNames, regionName, err = s.getClusterZoneNames(clusterName); err != nil {
return nil, nil, nil, err
}
for lbClusterName, lbStatus := range cachedService.serviceStatusMap {
lbZoneNames, lbRegionName, err := s.getClusterZoneNames(lbClusterName)
if err != nil {
return nil, nil, nil, err
}
for _, ingress := range lbStatus.Ingress {
readyEndpoints, ok := cachedService.endpointMap[lbClusterName]
if !ok || readyEndpoints == 0 {
continue
}
var address string
// We should get either an IP address or a hostname - use whichever one we get
if ingress.IP != "" {
address = ingress.IP
} else if ingress.Hostname != "" {
address = ingress.Hostname
}
if len(address) <= 0 {
return nil, nil, nil, fmt.Errorf("Service %s/%s in cluster %s has neither LoadBalancerStatus.ingress.ip nor LoadBalancerStatus.ingress.hostname. Cannot use it as endpoint for federated service.",
cachedService.lastState.Name, cachedService.lastState.Namespace, clusterName)
}
for _, lbZoneName := range lbZoneNames {
for _, zoneName := range zoneNames {
if lbZoneName == zoneName {
zoneEndpoints = append(zoneEndpoints, address)
}
}
}
if lbRegionName == regionName {
regionEndpoints = append(regionEndpoints, address)
}
globalEndpoints = append(globalEndpoints, address)
}
}
return zoneEndpoints, regionEndpoints, globalEndpoints, nil
}
// getClusterZoneNames returns the name of the zones (and the region) where the specified cluster exists (e.g. zones "us-east1-c" on GCE, or "us-east-1b" on AWS)
func (s *ServiceController) getClusterZoneNames(clusterName string) (zones []string, region string, err error) {
client, ok := s.clusterCache.clientMap[clusterName]
if !ok {
return nil, "", fmt.Errorf("Cluster cache does not contain entry for cluster %s", clusterName)
}
if client.cluster == nil {
return nil, "", fmt.Errorf("Cluster cache entry for cluster %s is nil", clusterName)
}
return client.cluster.Status.Zones, client.cluster.Status.Region, nil
}
// getServiceDnsSuffix returns the DNS suffix to use when creating federated-service DNS records
func (s *ServiceController) getServiceDnsSuffix() (string, error) {
return s.serviceDnsSuffix, nil
}
// getDnsZones returns the DNS zones matching dnsZoneName and dnsZoneID (if specified)
func getDnsZones(dnsZoneName string, dnsZoneID string, dnsZonesInterface dnsprovider.Zones) ([]dnsprovider.Zone, error) {
// TODO: We need query-by-name and query-by-id functions
dnsZones, err := dnsZonesInterface.List()
if err != nil {
return nil, err
}
var matches []dnsprovider.Zone
findName := strings.TrimSuffix(dnsZoneName, ".")
for _, dnsZone := range dnsZones {
if dnsZoneID != "" {
if dnsZoneID != dnsZone.ID() {
continue
}
}
if findName != "" {
if strings.TrimSuffix(dnsZone.Name(), ".") != findName {
continue
}
}
matches = append(matches, dnsZone)
}
return matches, nil
}
// getDnsZone returns the DNS zone, as identified by dnsZoneName and dnsZoneID
// This is similar to getDnsZones, but returns an error if there are zero or multiple matching zones.
func getDnsZone(dnsZoneName string, dnsZoneID string, dnsZonesInterface dnsprovider.Zones) (dnsprovider.Zone, error) {
dnsZones, err := getDnsZones(dnsZoneName, dnsZoneID, dnsZonesInterface)
if err != nil {
return nil, err
}
if len(dnsZones) == 1 {
return dnsZones[0], nil
}
name := dnsZoneName
if dnsZoneID != "" {
name += "/" + dnsZoneID
}
if len(dnsZones) == 0 {
return nil, fmt.Errorf("DNS zone %s not found.", name)
} else {
return nil, fmt.Errorf("DNS zone %s is ambiguous (please specify zoneID).", name)
}
}
/* getRrset is a hack around the fact that dnsprovider.ResourceRecordSets interface does not yet include a Get() method, only a List() method. TODO: Fix that.
Note that if the named resource record set does not exist, but no error occurred, the returned set, and error, are both nil
*/
func getRrset(dnsName string, rrsetsInterface dnsprovider.ResourceRecordSets) (dnsprovider.ResourceRecordSet, error) {
var returnVal dnsprovider.ResourceRecordSet
rrsets, err := rrsetsInterface.List()
if err != nil {
return nil, err
}
for _, rrset := range rrsets {
if rrset.Name() == dnsName {
returnVal = rrset
break
}
}
return returnVal, nil
}
/* getResolvedEndpoints performs DNS resolution on the provided slice of endpoints (which might be DNS names or IPv4 addresses)
and returns a list of IPv4 addresses. If any of the endpoints are neither valid IPv4 addresses nor resolvable DNS names,
non-nil error is also returned (possibly along with a partially complete list of resolved endpoints.
*/
func getResolvedEndpoints(endpoints []string) ([]string, error) {
resolvedEndpoints := make([]string, 0, len(endpoints))
for _, endpoint := range endpoints {
if net.ParseIP(endpoint) == nil {
// It's not a valid IP address, so assume it's a DNS name, and try to resolve it,
// replacing its DNS name with its IP addresses in expandedEndpoints
ipAddrs, err := net.LookupHost(endpoint)
if err != nil {
return resolvedEndpoints, err
}
resolvedEndpoints = append(resolvedEndpoints, ipAddrs...)
} else {
resolvedEndpoints = append(resolvedEndpoints, endpoint)
}
}
return resolvedEndpoints, nil
}
/* ensureDnsRrsets ensures (idempotently, and with minimum mutations) that all of the DNS resource record sets for dnsName are consistent with endpoints.
if endpoints is nil or empty, a CNAME record to uplevelCname is ensured.
*/
func (s *ServiceController) ensureDnsRrsets(dnsZone dnsprovider.Zone, dnsName string, endpoints []string, uplevelCname string) error {
rrsets, supported := dnsZone.ResourceRecordSets()
if !supported {
return fmt.Errorf("Failed to ensure DNS records for %s. DNS provider does not support the ResourceRecordSets interface.", dnsName)
}
rrset, err := getRrset(dnsName, rrsets) // TODO: rrsets.Get(dnsName)
if err != nil {
return err
}
if rrset == nil {
glog.V(4).Infof("No recordsets found for DNS name %q. Need to add either A records (if we have healthy endpoints), or a CNAME record to %q", dnsName, uplevelCname)
if len(endpoints) < 1 {
glog.V(4).Infof("There are no healthy endpoint addresses at level %q, so CNAME to %q, if provided", dnsName, uplevelCname)
if uplevelCname != "" {
glog.V(4).Infof("Creating CNAME to %q for %q", uplevelCname, dnsName)
newRrset := rrsets.New(dnsName, []string{uplevelCname}, minDnsTtl, rrstype.CNAME)
glog.V(4).Infof("Adding recordset %v", newRrset)
err = rrsets.StartChangeset().Add(newRrset).Apply()
if err != nil {
return err
}
glog.V(4).Infof("Successfully created CNAME to %q for %q", uplevelCname, dnsName)
} else {
glog.V(4).Infof("We want no record for %q, and we have no record, so we're all good.", dnsName)
}
} else {
// We have valid endpoint addresses, so just add them as A records.
// But first resolve DNS names, as some cloud providers (like AWS) expose
// load balancers behind DNS names, not IP addresses.
glog.V(4).Infof("We have valid endpoint addresses %v at level %q, so add them as A records, after resolving DNS names", endpoints, dnsName)
resolvedEndpoints, err := getResolvedEndpoints(endpoints)
if err != nil {
return err // TODO: We could potentially add the ones we did get back, even if some of them failed to resolve.
}
newRrset := rrsets.New(dnsName, resolvedEndpoints, minDnsTtl, rrstype.A)
glog.V(4).Infof("Adding recordset %v", newRrset)
err = rrsets.StartChangeset().Add(newRrset).Apply()
if err != nil {
return err
}
glog.V(4).Infof("Successfully added recordset %v", newRrset)
}
} else {
// the rrset already exists, so make it right.
glog.V(4).Infof("Recordset %v already exists. Ensuring that it is correct.", rrset)
if len(endpoints) < 1 {
// Need an appropriate CNAME record. Check that we have it.
newRrset := rrsets.New(dnsName, []string{uplevelCname}, minDnsTtl, rrstype.CNAME)
glog.V(4).Infof("No healthy endpoints for %s. Have recordset %v. Need recordset %v", dnsName, rrset, newRrset)
if dnsprovider.ResourceRecordSetsEquivalent(rrset, newRrset) {
// The existing rrset is equivalent to the required one - our work is done here
glog.V(4).Infof("Existing recordset %v is equivalent to needed recordset %v, our work is done here.", rrset, newRrset)
return nil
} else {
// Need to replace the existing one with a better one (or just remove it if we have no healthy endpoints).
glog.V(4).Infof("Existing recordset %v not equivalent to needed recordset %v removing existing and adding needed.", rrset, newRrset)
changeSet := rrsets.StartChangeset()
changeSet.Remove(rrset)
if uplevelCname != "" {
changeSet.Add(newRrset)
if err := changeSet.Apply(); err != nil {
return err
}
glog.V(4).Infof("Successfully replaced needed recordset %v -> %v", rrset, newRrset)
} else {
if err := changeSet.Apply(); err != nil {
return err
}
glog.V(4).Infof("Successfully removed existing recordset %v", rrset)
glog.V(4).Infof("Uplevel CNAME is empty string. Not adding recordset %v", newRrset)
}
}
} else {
// We have an rrset in DNS, possibly with some missing addresses and some unwanted addresses.
// And we have healthy endpoints. Just replace what's there with the healthy endpoints, if it's not already correct.
glog.V(4).Infof("%s: Healthy endpoints %v exist. Recordset %v exists. Reconciling.", dnsName, endpoints, rrset)
resolvedEndpoints, err := getResolvedEndpoints(endpoints)
if err != nil { // Some invalid addresses or otherwise unresolvable DNS names.
return err // TODO: We could potentially add the ones we did get back, even if some of them failed to resolve.
}
newRrset := rrsets.New(dnsName, resolvedEndpoints, minDnsTtl, rrstype.A)
glog.V(4).Infof("Have recordset %v. Need recordset %v", rrset, newRrset)
if dnsprovider.ResourceRecordSetsEquivalent(rrset, newRrset) {
glog.V(4).Infof("Existing recordset %v is equivalent to needed recordset %v, our work is done here.", rrset, newRrset)
// TODO: We could be more thorough about checking for equivalence to avoid unnecessary updates, but in the
// worst case we'll just replace what's there with an equivalent, if not exactly identical record set.
return nil
} else {
// Need to replace the existing one with a better one
glog.V(4).Infof("Existing recordset %v is not equivalent to needed recordset %v, removing existing and adding needed.", rrset, newRrset)
if err = rrsets.StartChangeset().Remove(rrset).Add(newRrset).Apply(); err != nil {
return err
}
glog.V(4).Infof("Successfully replaced recordset %v -> %v", rrset, newRrset)
}
}
}
return nil
}
/* ensureDnsRecords ensures (idempotently, and with minimum mutations) that all of the DNS records for a service in a given cluster are correct,
given the current state of that service in that cluster. This should be called every time the state of a service might have changed
(either w.r.t. it's loadbalancer address, or if the number of healthy backend endpoints for that service transitioned from zero to non-zero
(or vice verse). Only shards of the service which have both a loadbalancer ingress IP address or hostname AND at least one healthy backend endpoint
are included in DNS records for that service (at all of zone, region and global levels). All other addresses are removed. Also, if no shards exist
in the zone or region of the cluster, a CNAME reference to the next higher level is ensured to exist. */
func (s *ServiceController) ensureDnsRecords(clusterName string, cachedService *cachedService) error {
// Quinton: Pseudocode....
// See https://github.com/kubernetes/kubernetes/pull/25107#issuecomment-218026648
// For each service we need the following DNS names:
// mysvc.myns.myfed.svc.z1.r1.mydomain.com (for zone z1 in region r1)
// - an A record to IP address of specific shard in that zone (if that shard exists and has healthy endpoints)
// - OR a CNAME record to the next level up, i.e. mysvc.myns.myfed.svc.r1.mydomain.com (if a healthy shard does not exist in zone z1)
// mysvc.myns.myfed.svc.r1.federation
// - a set of A records to IP addresses of all healthy shards in region r1, if one or more of these exist
// - OR a CNAME record to the next level up, i.e. mysvc.myns.myfed.svc.mydomain.com (if no healthy shards exist in region r1)
// mysvc.myns.myfed.svc.federation
// - a set of A records to IP addresses of all healthy shards in all regions, if one or more of these exist.
// - no record (NXRECORD response) if no healthy shards exist in any regions)
//
// For each cached service, cachedService.lastState tracks the current known state of the service, while cachedService.appliedState contains
// the state of the service when we last successfully sync'd it's DNS records.
// So this time around we only need to patch that (add new records, remove deleted records, and update changed records.
//
if s == nil {
return fmt.Errorf("nil ServiceController passed to ServiceController.ensureDnsRecords(clusterName: %s, cachedService: %v)", clusterName, cachedService)
}
if s.dns == nil {
return nil
}
if cachedService == nil {
return fmt.Errorf("nil cachedService passed to ServiceController.ensureDnsRecords(clusterName: %s, cachedService: %v)", clusterName, cachedService)
}
serviceName := cachedService.lastState.Name
namespaceName := cachedService.lastState.Namespace
zoneNames, regionName, err := s.getClusterZoneNames(clusterName)
if err != nil {
return err
}
if zoneNames == nil {
return fmt.Errorf("failed to get cluster zone names")
}
serviceDnsSuffix, err := s.getServiceDnsSuffix()
if err != nil {
return err
}
zoneEndpoints, regionEndpoints, globalEndpoints, err := s.getHealthyEndpoints(clusterName, cachedService)
if err != nil {
return err
}
commonPrefix := serviceName + "." + namespaceName + "." + s.federationName + ".svc"
// dnsNames is the path up the DNS search tree, starting at the leaf
dnsNames := []string{
commonPrefix + "." + zoneNames[0] + "." + regionName + "." + serviceDnsSuffix, // zone level - TODO might need other zone names for multi-zone clusters
commonPrefix + "." + regionName + "." + serviceDnsSuffix, // region level, one up from zone level
commonPrefix + "." + serviceDnsSuffix, // global level, one up from region level
"", // nowhere to go up from global level
}
endpoints := [][]string{zoneEndpoints, regionEndpoints, globalEndpoints}
dnsZone, err := getDnsZone(s.zoneName, s.zoneID, s.dnsZones)
if err != nil {
return err
}
for i, endpoint := range endpoints {
if err = s.ensureDnsRrsets(dnsZone, dnsNames[i], endpoint, dnsNames[i+1]); err != nil {
return err
}
}
return nil
}

View file

@ -0,0 +1,171 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"sync"
"testing"
"fmt"
"reflect"
"sort"
"k8s.io/kubernetes/federation/apis/federation/v1beta1"
"k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns" // Only for unit testing purposes.
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/sets"
)
func TestServiceController_ensureDnsRecords(t *testing.T) {
tests := []struct {
name string
service v1.Service
expected []string
serviceStatus v1.LoadBalancerStatus
}{
{
name: "withip",
service: v1.Service{
ObjectMeta: v1.ObjectMeta{
Name: "servicename",
Namespace: "servicenamespace",
},
},
serviceStatus: buildServiceStatus([][]string{{"198.51.100.1", ""}}),
expected: []string{
"example.com:servicename.servicenamespace.myfederation.svc.federation.example.com:A:180:[198.51.100.1]",
"example.com:servicename.servicenamespace.myfederation.svc.fooregion.federation.example.com:A:180:[198.51.100.1]",
"example.com:servicename.servicenamespace.myfederation.svc.foozone.fooregion.federation.example.com:A:180:[198.51.100.1]",
},
},
/*
TODO: getResolvedEndpoints preforms DNS lookup.
Mock and maybe look at error handling when some endpoints resolve, but also caching?
{
name: "withname",
service: v1.Service{
ObjectMeta: v1.ObjectMeta{
Name: "servicename",
Namespace: "servicenamespace",
},
},
serviceStatus: buildServiceStatus([][]string{{"", "randomstring.amazonelb.example.com"}}),
expected: []string{
"example.com:servicename.servicenamespace.myfederation.svc.federation.example.com:A:180:[198.51.100.1]",
"example.com:servicename.servicenamespace.myfederation.svc.fooregion.federation.example.com:A:180:[198.51.100.1]",
"example.com:servicename.servicenamespace.myfederation.svc.foozone.fooregion.federation.example.com:A:180:[198.51.100.1]",
},
},
*/
{
name: "noendpoints",
service: v1.Service{
ObjectMeta: v1.ObjectMeta{
Name: "servicename",
Namespace: "servicenamespace",
},
},
expected: []string{
"example.com:servicename.servicenamespace.myfederation.svc.fooregion.federation.example.com:CNAME:180:[servicename.servicenamespace.myfederation.svc.federation.example.com]",
"example.com:servicename.servicenamespace.myfederation.svc.foozone.fooregion.federation.example.com:CNAME:180:[servicename.servicenamespace.myfederation.svc.fooregion.federation.example.com]",
},
},
}
for _, test := range tests {
fakedns, _ := clouddns.NewFakeInterface()
fakednsZones, ok := fakedns.Zones()
if !ok {
t.Error("Unable to fetch zones")
}
serviceController := ServiceController{
dns: fakedns,
dnsZones: fakednsZones,
serviceDnsSuffix: "federation.example.com",
zoneName: "example.com",
federationName: "myfederation",
serviceCache: &serviceCache{fedServiceMap: make(map[string]*cachedService)},
clusterCache: &clusterClientCache{
rwlock: sync.Mutex{},
clientMap: make(map[string]*clusterCache),
},
knownClusterSet: make(sets.String),
}
clusterName := "testcluster"
serviceController.clusterCache.clientMap[clusterName] = &clusterCache{
cluster: &v1beta1.Cluster{
Status: v1beta1.ClusterStatus{
Zones: []string{"foozone"},
Region: "fooregion",
},
},
}
cachedService := &cachedService{
lastState: &test.service,
endpointMap: make(map[string]int),
serviceStatusMap: make(map[string]v1.LoadBalancerStatus),
}
cachedService.endpointMap[clusterName] = 1
if !reflect.DeepEqual(&test.serviceStatus, &v1.LoadBalancerStatus{}) {
cachedService.serviceStatusMap[clusterName] = test.serviceStatus
}
err := serviceController.ensureDnsRecords(clusterName, cachedService)
if err != nil {
t.Errorf("Test failed for %s, unexpected error %v", test.name, err)
}
zones, err := fakednsZones.List()
if err != nil {
t.Errorf("error querying zones: %v", err)
}
// Dump every record to a testable-by-string-comparison form
var records []string
for _, z := range zones {
zoneName := z.Name()
rrs, ok := z.ResourceRecordSets()
if !ok {
t.Errorf("cannot get rrs for zone %q", zoneName)
}
rrList, err := rrs.List()
if err != nil {
t.Errorf("error querying rr for zone %q: %v", zoneName, err)
}
for _, rr := range rrList {
rrdatas := rr.Rrdatas()
// Put in consistent (testable-by-string-comparison) order
sort.Strings(rrdatas)
records = append(records, fmt.Sprintf("%s:%s:%s:%d:%s", zoneName, rr.Name(), rr.Type(), rr.Ttl(), rrdatas))
}
}
// Ignore order of records
sort.Strings(records)
sort.Strings(test.expected)
if !reflect.DeepEqual(records, test.expected) {
t.Errorf("Test %q failed. Actual=%v, Expected=%v", test.name, records, test.expected)
}
}
}

View file

@ -0,0 +1,19 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package service contains code for syncing Kubernetes services,
// and cloud DNS servers with the federated service registry.
package service // import "k8s.io/kubernetes/federation/pkg/federation-controller/service"

View file

@ -0,0 +1,206 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"fmt"
"time"
fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
v1 "k8s.io/kubernetes/pkg/api/v1"
cache "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/controller"
"github.com/golang/glog"
)
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (sc *ServiceController) clusterEndpointWorker() {
// process all pending events in endpointWorkerDoneChan
ForLoop:
for {
select {
case clusterName := <-sc.endpointWorkerDoneChan:
sc.endpointWorkerMap[clusterName] = false
default:
// non-blocking, comes here if all existing events are processed
break ForLoop
}
}
for clusterName, cache := range sc.clusterCache.clientMap {
workerExist, found := sc.endpointWorkerMap[clusterName]
if found && workerExist {
continue
}
// create a worker only if the previous worker has finished and gone out of scope
go func(cache *clusterCache, clusterName string) {
fedClient := sc.federationClient
for {
func() {
key, quit := cache.endpointQueue.Get()
// update endpoint cache
if quit {
// send signal that current worker has finished tasks and is going out of scope
sc.endpointWorkerDoneChan <- clusterName
return
}
defer cache.endpointQueue.Done(key)
err := sc.clusterCache.syncEndpoint(key.(string), clusterName, cache, sc.serviceCache, fedClient, sc)
if err != nil {
glog.V(2).Infof("Failed to sync endpoint: %+v", err)
}
}()
}
}(cache, clusterName)
sc.endpointWorkerMap[clusterName] = true
}
}
// Whenever there is change on endpoint, the federation service should be updated
// key is the namespaced name of endpoint
func (cc *clusterClientCache) syncEndpoint(key, clusterName string, clusterCache *clusterCache, serviceCache *serviceCache, fedClient fedclientset.Interface, serviceController *ServiceController) error {
cachedService, ok := serviceCache.get(key)
if !ok {
// here we filtered all non-federation services
return nil
}
endpointInterface, exists, err := clusterCache.endpointStore.GetByKey(key)
if err != nil {
glog.Errorf("Did not successfully get %v from store: %v, will retry later", key, err)
clusterCache.endpointQueue.Add(key)
return err
}
if exists {
endpoint, ok := endpointInterface.(*v1.Endpoints)
if ok {
glog.V(4).Infof("Found endpoint for federation service %s/%s from cluster %s", endpoint.Namespace, endpoint.Name, clusterName)
err = cc.processEndpointUpdate(cachedService, endpoint, clusterName, serviceController)
} else {
_, ok := endpointInterface.(cache.DeletedFinalStateUnknown)
if !ok {
return fmt.Errorf("Object contained wasn't a service or a deleted key: %+v", endpointInterface)
}
glog.Infof("Found tombstone for %v", key)
err = cc.processEndpointDeletion(cachedService, clusterName, serviceController)
}
} else {
// service absence in store means watcher caught the deletion, ensure LB info is cleaned
glog.Infof("Can not get endpoint %v for cluster %s from endpointStore", key, clusterName)
err = cc.processEndpointDeletion(cachedService, clusterName, serviceController)
}
if err != nil {
glog.Errorf("Failed to sync service: %+v, put back to service queue", err)
clusterCache.endpointQueue.Add(key)
}
cachedService.resetDNSUpdateDelay()
return nil
}
func (cc *clusterClientCache) processEndpointDeletion(cachedService *cachedService, clusterName string, serviceController *ServiceController) error {
glog.V(4).Infof("Processing endpoint deletion for %s/%s, cluster %s", cachedService.lastState.Namespace, cachedService.lastState.Name, clusterName)
var err error
cachedService.rwlock.Lock()
defer cachedService.rwlock.Unlock()
_, ok := cachedService.endpointMap[clusterName]
// TODO remove ok checking? if service controller is restarted, then endpointMap for the cluster does not exist
// need to query dns info from dnsprovider and make sure of if deletion is needed
if ok {
// endpoints lost, clean dns record
glog.V(4).Infof("Cached endpoint was found for %s/%s, cluster %s, removing", cachedService.lastState.Namespace, cachedService.lastState.Name, clusterName)
delete(cachedService.endpointMap, clusterName)
for i := 0; i < clientRetryCount; i++ {
err := serviceController.ensureDnsRecords(clusterName, cachedService)
if err == nil {
return nil
}
glog.V(4).Infof("Error ensuring DNS Records: %v", err)
time.Sleep(cachedService.nextDNSUpdateDelay())
}
}
return err
}
// Update dns info when endpoint update event received
// We do not care about the endpoint info, what we need to make sure here is len(endpoints.subsets)>0
func (cc *clusterClientCache) processEndpointUpdate(cachedService *cachedService, endpoint *v1.Endpoints, clusterName string, serviceController *ServiceController) error {
glog.V(4).Infof("Processing endpoint update for %s/%s, cluster %s", endpoint.Namespace, endpoint.Name, clusterName)
var err error
cachedService.rwlock.Lock()
var reachable bool
defer cachedService.rwlock.Unlock()
_, ok := cachedService.endpointMap[clusterName]
if !ok {
for _, subset := range endpoint.Subsets {
if len(subset.Addresses) > 0 {
reachable = true
break
}
}
if reachable {
// first time get endpoints, update dns record
glog.V(4).Infof("Reachable endpoint was found for %s/%s, cluster %s, building endpointMap", endpoint.Namespace, endpoint.Name, clusterName)
cachedService.endpointMap[clusterName] = 1
for i := 0; i < clientRetryCount; i++ {
err := serviceController.ensureDnsRecords(clusterName, cachedService)
if err == nil {
return nil
}
glog.V(4).Infof("Error ensuring DNS Records: %v", err)
time.Sleep(cachedService.nextDNSUpdateDelay())
}
return err
}
} else {
for _, subset := range endpoint.Subsets {
if len(subset.Addresses) > 0 {
reachable = true
break
}
}
if !reachable {
// first time get endpoints, update dns record
glog.V(4).Infof("Reachable endpoint was lost for %s/%s, cluster %s, deleting endpointMap", endpoint.Namespace, endpoint.Name, clusterName)
delete(cachedService.endpointMap, clusterName)
for i := 0; i < clientRetryCount; i++ {
err := serviceController.ensureDnsRecords(clusterName, cachedService)
if err == nil {
return nil
}
glog.V(4).Infof("Error ensuring DNS Records: %v", err)
time.Sleep(cachedService.nextDNSUpdateDelay())
}
return err
}
}
return nil
}
// obj could be an *api.Endpoints, or a DeletionFinalStateUnknown marker item.
func (cc *clusterClientCache) enqueueEndpoint(obj interface{}, clusterName string) {
key, err := controller.KeyFunc(obj)
if err != nil {
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
return
}
_, ok := cc.clientMap[clusterName]
if ok {
cc.clientMap[clusterName].endpointQueue.Add(key)
}
}

View file

@ -0,0 +1,161 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"testing"
"k8s.io/kubernetes/federation/apis/federation/v1beta1"
"k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns" // Only for unit testing purposes.
v1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/sets"
)
var fakeDns, _ = clouddns.NewFakeInterface() // No need to check for unsupported interfaces, as the fake interface supports everything that's required.
var fakeDnsZones, _ = fakeDns.Zones()
var fakeServiceController = ServiceController{
dns: fakeDns,
dnsZones: fakeDnsZones,
federationName: "fed1",
zoneName: "example.com",
serviceDnsSuffix: "federation.example.com",
serviceCache: &serviceCache{fedServiceMap: make(map[string]*cachedService)},
clusterCache: &clusterClientCache{
clientMap: make(map[string]*clusterCache),
},
knownClusterSet: make(sets.String),
}
func buildEndpoint(subsets [][]string) *v1.Endpoints {
endpoint := &v1.Endpoints{
Subsets: []v1.EndpointSubset{
{Addresses: []v1.EndpointAddress{}},
},
}
for _, element := range subsets {
address := v1.EndpointAddress{IP: element[0], Hostname: element[1], TargetRef: nil}
endpoint.Subsets[0].Addresses = append(endpoint.Subsets[0].Addresses, address)
}
return endpoint
}
func TestProcessEndpointUpdate(t *testing.T) {
clusterName := "foo"
cc := clusterClientCache{
clientMap: map[string]*clusterCache{
clusterName: {
cluster: &v1beta1.Cluster{
Status: v1beta1.ClusterStatus{
Zones: []string{"foozone"},
Region: "fooregion",
},
},
},
},
}
tests := []struct {
name string
cachedService *cachedService
endpoint *v1.Endpoints
clusterName string
expectResult int
}{
{
"no-cache",
&cachedService{
lastState: &v1.Service{},
endpointMap: make(map[string]int),
},
buildEndpoint([][]string{{"ip1", ""}}),
clusterName,
1,
},
{
"has-cache",
&cachedService{
lastState: &v1.Service{},
endpointMap: map[string]int{
"foo": 1,
},
},
buildEndpoint([][]string{{"ip1", ""}}),
clusterName,
1,
},
}
fakeServiceController.clusterCache = &cc
for _, test := range tests {
cc.processEndpointUpdate(test.cachedService, test.endpoint, test.clusterName, &fakeServiceController)
if test.expectResult != test.cachedService.endpointMap[test.clusterName] {
t.Errorf("Test failed for %s, expected %v, saw %v", test.name, test.expectResult, test.cachedService.endpointMap[test.clusterName])
}
}
}
func TestProcessEndpointDeletion(t *testing.T) {
clusterName := "foo"
cc := clusterClientCache{
clientMap: map[string]*clusterCache{
clusterName: {
cluster: &v1beta1.Cluster{
Status: v1beta1.ClusterStatus{
Zones: []string{"foozone"},
Region: "fooregion",
},
},
},
},
}
tests := []struct {
name string
cachedService *cachedService
endpoint *v1.Endpoints
clusterName string
expectResult int
}{
{
"no-cache",
&cachedService{
lastState: &v1.Service{},
endpointMap: make(map[string]int),
},
buildEndpoint([][]string{{"ip1", ""}}),
clusterName,
0,
},
{
"has-cache",
&cachedService{
lastState: &v1.Service{},
endpointMap: map[string]int{
clusterName: 1,
},
},
buildEndpoint([][]string{{"ip1", ""}}),
clusterName,
0,
},
}
fakeServiceController.clusterCache = &cc
for _, test := range tests {
cc.processEndpointDeletion(test.cachedService, test.clusterName, &fakeServiceController)
if test.expectResult != test.cachedService.endpointMap[test.clusterName] {
t.Errorf("Test failed for %s, expected %v, saw %v", test.name, test.expectResult, test.cachedService.endpointMap[test.clusterName])
}
}
}

View file

@ -0,0 +1,303 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"fmt"
"time"
fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
"k8s.io/kubernetes/pkg/api/errors"
v1 "k8s.io/kubernetes/pkg/api/v1"
cache "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/controller"
"reflect"
"sort"
"github.com/golang/glog"
)
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (sc *ServiceController) clusterServiceWorker() {
// process all pending events in serviceWorkerDoneChan
ForLoop:
for {
select {
case clusterName := <-sc.serviceWorkerDoneChan:
sc.serviceWorkerMap[clusterName] = false
default:
// non-blocking, comes here if all existing events are processed
break ForLoop
}
}
for clusterName, cache := range sc.clusterCache.clientMap {
workerExist, found := sc.serviceWorkerMap[clusterName]
if found && workerExist {
continue
}
// create a worker only if the previous worker has finished and gone out of scope
go func(cache *clusterCache, clusterName string) {
fedClient := sc.federationClient
for {
func() {
key, quit := cache.serviceQueue.Get()
if quit {
// send signal that current worker has finished tasks and is going out of scope
sc.serviceWorkerDoneChan <- clusterName
return
}
defer cache.serviceQueue.Done(key)
err := sc.clusterCache.syncService(key.(string), clusterName, cache, sc.serviceCache, fedClient, sc)
if err != nil {
glog.Errorf("Failed to sync service: %+v", err)
}
}()
}
}(cache, clusterName)
sc.serviceWorkerMap[clusterName] = true
}
}
// Whenever there is change on service, the federation service should be updated
func (cc *clusterClientCache) syncService(key, clusterName string, clusterCache *clusterCache, serviceCache *serviceCache, fedClient fedclientset.Interface, sc *ServiceController) error {
// obj holds the latest service info from apiserver, return if there is no federation cache for the service
cachedService, ok := serviceCache.get(key)
if !ok {
// if serviceCache does not exists, that means the service is not created by federation, we should skip it
return nil
}
serviceInterface, exists, err := clusterCache.serviceStore.Indexer.GetByKey(key)
if err != nil {
glog.Errorf("Did not successfully get %v from store: %v, will retry later", key, err)
clusterCache.serviceQueue.Add(key)
return err
}
var needUpdate, isDeletion bool
if exists {
service, ok := serviceInterface.(*v1.Service)
if ok {
glog.V(4).Infof("Found service for federation service %s/%s from cluster %s", service.Namespace, service.Name, clusterName)
needUpdate = cc.processServiceUpdate(cachedService, service, clusterName)
} else {
_, ok := serviceInterface.(cache.DeletedFinalStateUnknown)
if !ok {
return fmt.Errorf("Object contained wasn't a service or a deleted key: %+v", serviceInterface)
}
glog.Infof("Found tombstone for %v", key)
needUpdate = cc.processServiceDeletion(cachedService, clusterName)
isDeletion = true
}
} else {
glog.Infof("Can not get service %v for cluster %s from serviceStore", key, clusterName)
needUpdate = cc.processServiceDeletion(cachedService, clusterName)
isDeletion = true
}
if needUpdate {
for i := 0; i < clientRetryCount; i++ {
err := sc.ensureDnsRecords(clusterName, cachedService)
if err == nil {
break
}
glog.V(4).Infof("Error ensuring DNS Records for service %s on cluster %s: %v", key, clusterName, err)
time.Sleep(cachedService.nextDNSUpdateDelay())
clusterCache.serviceQueue.Add(key)
// did not retry here as we still want to persist federation apiserver even ensure dns records fails
}
err := cc.persistFedServiceUpdate(cachedService, fedClient)
if err == nil {
cachedService.appliedState = cachedService.lastState
cachedService.resetFedUpdateDelay()
} else {
if err != nil {
glog.Errorf("Failed to sync service: %+v, put back to service queue", err)
clusterCache.serviceQueue.Add(key)
}
}
}
if isDeletion {
// cachedService is not reliable here as
// deleting cache is the last step of federation service deletion
_, err := fedClient.Core().Services(cachedService.lastState.Namespace).Get(cachedService.lastState.Name)
// rebuild service if federation service still exists
if err == nil || !errors.IsNotFound(err) {
return sc.ensureClusterService(cachedService, clusterName, cachedService.appliedState, clusterCache.clientset)
}
}
return nil
}
// processServiceDeletion is triggered when a service is delete from underlying k8s cluster
// the deletion function will wip out the cached ingress info of the service from federation service ingress
// the function returns a bool to indicate if actual update happened on federation service cache
// and if the federation service cache is updated, the updated info should be post to federation apiserver
func (cc *clusterClientCache) processServiceDeletion(cachedService *cachedService, clusterName string) bool {
cachedService.rwlock.Lock()
defer cachedService.rwlock.Unlock()
cachedStatus, ok := cachedService.serviceStatusMap[clusterName]
// cached status found, remove ingress info from federation service cache
if ok {
cachedFedServiceStatus := cachedService.lastState.Status.LoadBalancer
removeIndexes := []int{}
for i, fed := range cachedFedServiceStatus.Ingress {
for _, new := range cachedStatus.Ingress {
// remove if same ingress record found
if new.IP == fed.IP && new.Hostname == fed.Hostname {
removeIndexes = append(removeIndexes, i)
}
}
}
sort.Ints(removeIndexes)
for i := len(removeIndexes) - 1; i >= 0; i-- {
cachedFedServiceStatus.Ingress = append(cachedFedServiceStatus.Ingress[:removeIndexes[i]], cachedFedServiceStatus.Ingress[removeIndexes[i]+1:]...)
glog.V(4).Infof("Remove old ingress %d for service %s/%s", removeIndexes[i], cachedService.lastState.Namespace, cachedService.lastState.Name)
}
delete(cachedService.serviceStatusMap, clusterName)
delete(cachedService.endpointMap, clusterName)
cachedService.lastState.Status.LoadBalancer = cachedFedServiceStatus
return true
} else {
glog.V(4).Infof("Service removal %s/%s from cluster %s observed.", cachedService.lastState.Namespace, cachedService.lastState.Name, clusterName)
}
return false
}
// processServiceUpdate Update ingress info when service updated
// the function returns a bool to indicate if actual update happened on federation service cache
// and if the federation service cache is updated, the updated info should be post to federation apiserver
func (cc *clusterClientCache) processServiceUpdate(cachedService *cachedService, service *v1.Service, clusterName string) bool {
glog.V(4).Infof("Processing service update for %s/%s, cluster %s", service.Namespace, service.Name, clusterName)
cachedService.rwlock.Lock()
defer cachedService.rwlock.Unlock()
var needUpdate bool
newServiceLB := service.Status.LoadBalancer
cachedFedServiceStatus := cachedService.lastState.Status.LoadBalancer
if len(newServiceLB.Ingress) == 0 {
// not yet get LB IP
return false
}
cachedStatus, ok := cachedService.serviceStatusMap[clusterName]
if ok {
if reflect.DeepEqual(cachedStatus, newServiceLB) {
glog.V(4).Infof("Same ingress info observed for service %s/%s: %+v ", service.Namespace, service.Name, cachedStatus.Ingress)
} else {
glog.V(4).Infof("Ingress info was changed for service %s/%s: cache: %+v, new: %+v ",
service.Namespace, service.Name, cachedStatus.Ingress, newServiceLB)
needUpdate = true
}
} else {
glog.V(4).Infof("Cached service status was not found for %s/%s, cluster %s, building one", service.Namespace, service.Name, clusterName)
// cache is not always reliable(cache will be cleaned when service controller restart)
// two cases will run into this branch:
// 1. new service loadbalancer info received -> no info in cache, and no in federation service
// 2. service controller being restarted -> no info in cache, but it is in federation service
// check if the lb info is already in federation service
cachedService.serviceStatusMap[clusterName] = newServiceLB
needUpdate = false
// iterate service ingress info
for _, new := range newServiceLB.Ingress {
var found bool
// if it is known by federation service
for _, fed := range cachedFedServiceStatus.Ingress {
if new.IP == fed.IP && new.Hostname == fed.Hostname {
found = true
break
}
}
if !found {
needUpdate = true
break
}
}
}
if needUpdate {
// new status = cached federation status - cached status + new status from k8s cluster
removeIndexes := []int{}
for i, fed := range cachedFedServiceStatus.Ingress {
for _, new := range cachedStatus.Ingress {
// remove if same ingress record found
if new.IP == fed.IP && new.Hostname == fed.Hostname {
removeIndexes = append(removeIndexes, i)
}
}
}
sort.Ints(removeIndexes)
for i := len(removeIndexes) - 1; i >= 0; i-- {
cachedFedServiceStatus.Ingress = append(cachedFedServiceStatus.Ingress[:removeIndexes[i]], cachedFedServiceStatus.Ingress[removeIndexes[i]+1:]...)
}
cachedFedServiceStatus.Ingress = append(cachedFedServiceStatus.Ingress, service.Status.LoadBalancer.Ingress...)
cachedService.lastState.Status.LoadBalancer = cachedFedServiceStatus
glog.V(4).Infof("Add new ingress info %+v for service %s/%s", service.Status.LoadBalancer, service.Namespace, service.Name)
} else {
glog.V(4).Infof("Same ingress info found for %s/%s, cluster %s", service.Namespace, service.Name, clusterName)
}
return needUpdate
}
func (cc *clusterClientCache) persistFedServiceUpdate(cachedService *cachedService, fedClient fedclientset.Interface) error {
service := cachedService.lastState
glog.V(5).Infof("Persist federation service status %s/%s", service.Namespace, service.Name)
var err error
for i := 0; i < clientRetryCount; i++ {
_, err := fedClient.Core().Services(service.Namespace).Get(service.Name)
if errors.IsNotFound(err) {
glog.Infof("Not persisting update to service '%s/%s' that no longer exists: %v",
service.Namespace, service.Name, err)
return nil
}
_, err = fedClient.Core().Services(service.Namespace).UpdateStatus(service)
if err == nil {
glog.V(2).Infof("Successfully update service %s/%s to federation apiserver", service.Namespace, service.Name)
return nil
}
if errors.IsNotFound(err) {
glog.Infof("Not persisting update to service '%s/%s' that no longer exists: %v",
service.Namespace, service.Name, err)
return nil
}
if errors.IsConflict(err) {
glog.V(4).Infof("Not persisting update to service '%s/%s' that has been changed since we received it: %v",
service.Namespace, service.Name, err)
return err
}
time.Sleep(cachedService.nextFedUpdateDelay())
}
return err
}
// obj could be an *api.Service, or a DeletionFinalStateUnknown marker item.
func (cc *clusterClientCache) enqueueService(obj interface{}, clusterName string) {
key, err := controller.KeyFunc(obj)
if err != nil {
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
return
}
_, ok := cc.clientMap[clusterName]
if ok {
cc.clientMap[clusterName].serviceQueue.Add(key)
}
}

View file

@ -0,0 +1,162 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"reflect"
"testing"
"k8s.io/kubernetes/pkg/api/v1"
)
func buildServiceStatus(ingresses [][]string) v1.LoadBalancerStatus {
status := v1.LoadBalancerStatus{
Ingress: []v1.LoadBalancerIngress{},
}
for _, element := range ingresses {
ingress := v1.LoadBalancerIngress{IP: element[0], Hostname: element[1]}
status.Ingress = append(status.Ingress, ingress)
}
return status
}
func TestProcessServiceUpdate(t *testing.T) {
cc := clusterClientCache{
clientMap: make(map[string]*clusterCache),
}
tests := []struct {
name string
cachedService *cachedService
service *v1.Service
clusterName string
expectNeedUpdate bool
expectStatus v1.LoadBalancerStatus
}{
{
"no-cache",
&cachedService{
lastState: &v1.Service{},
serviceStatusMap: make(map[string]v1.LoadBalancerStatus),
},
&v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}})}},
"foo",
true,
buildServiceStatus([][]string{{"ip1", ""}}),
},
{
"same-ingress",
&cachedService{
lastState: &v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}})}},
serviceStatusMap: map[string]v1.LoadBalancerStatus{
"foo1": {Ingress: []v1.LoadBalancerIngress{{IP: "ip1", Hostname: ""}}},
},
},
&v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}})}},
"foo1",
false,
buildServiceStatus([][]string{{"ip1", ""}}),
},
{
"diff-cluster",
&cachedService{
lastState: &v1.Service{
ObjectMeta: v1.ObjectMeta{Name: "bar1"},
},
serviceStatusMap: map[string]v1.LoadBalancerStatus{
"foo2": {Ingress: []v1.LoadBalancerIngress{{IP: "ip1", Hostname: ""}}},
},
},
&v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}})}},
"foo1",
true,
buildServiceStatus([][]string{{"ip1", ""}}),
},
{
"diff-ingress",
&cachedService{
lastState: &v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip4", ""}, {"ip1", ""}, {"ip2", ""}})}},
serviceStatusMap: map[string]v1.LoadBalancerStatus{
"foo1": buildServiceStatus([][]string{{"ip4", ""}, {"ip1", ""}, {"ip2", ""}}),
},
},
&v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip2", ""}, {"ip3", ""}, {"ip5", ""}})}},
"foo1",
true,
buildServiceStatus([][]string{{"ip2", ""}, {"ip3", ""}, {"ip5", ""}}),
},
}
for _, test := range tests {
result := cc.processServiceUpdate(test.cachedService, test.service, test.clusterName)
if test.expectNeedUpdate != result {
t.Errorf("Test failed for %s, expected %v, saw %v", test.name, test.expectNeedUpdate, result)
}
if !reflect.DeepEqual(test.expectStatus, test.cachedService.lastState.Status.LoadBalancer) {
t.Errorf("Test failed for %s, expected %v, saw %v", test.name, test.expectStatus, test.cachedService.lastState.Status.LoadBalancer)
}
}
}
func TestProcessServiceDeletion(t *testing.T) {
cc := clusterClientCache{
clientMap: make(map[string]*clusterCache),
}
tests := []struct {
name string
cachedService *cachedService
service *v1.Service
clusterName string
expectNeedUpdate bool
expectStatus v1.LoadBalancerStatus
}{
{
"same-ingress",
&cachedService{
lastState: &v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}})}},
serviceStatusMap: map[string]v1.LoadBalancerStatus{
"foo1": {Ingress: []v1.LoadBalancerIngress{{IP: "ip1", Hostname: ""}}},
},
},
&v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}})}},
"foo1",
true,
buildServiceStatus([][]string{}),
},
{
"diff-ingress",
&cachedService{
lastState: &v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip4", ""}, {"ip1", ""}, {"ip2", ""}, {"ip3", ""}, {"ip5", ""}, {"ip6", ""}, {"ip8", ""}})}},
serviceStatusMap: map[string]v1.LoadBalancerStatus{
"foo1": buildServiceStatus([][]string{{"ip1", ""}, {"ip2", ""}, {"ip3", ""}}),
"foo2": buildServiceStatus([][]string{{"ip5", ""}, {"ip6", ""}, {"ip8", ""}}),
},
},
&v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}, {"ip2", ""}, {"ip3", ""}})}},
"foo1",
true,
buildServiceStatus([][]string{{"ip4", ""}, {"ip5", ""}, {"ip6", ""}, {"ip8", ""}}),
},
}
for _, test := range tests {
result := cc.processServiceDeletion(test.cachedService, test.clusterName)
if test.expectNeedUpdate != result {
t.Errorf("Test failed for %s, expected %v, saw %v", test.name, test.expectNeedUpdate, result)
}
if !reflect.DeepEqual(test.expectStatus, test.cachedService.lastState.Status.LoadBalancer) {
t.Errorf("Test failed for %s, expected %+v, saw %+v", test.name, test.expectStatus, test.cachedService.lastState.Status.LoadBalancer)
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,85 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"sync"
"testing"
"k8s.io/kubernetes/federation/apis/federation/v1beta1"
"k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns" // Only for unit testing purposes.
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/sets"
)
func TestGetClusterConditionPredicate(t *testing.T) {
fakedns, _ := clouddns.NewFakeInterface() // No need to check for unsupported interfaces, as the fake interface supports everything that's required.
serviceController := ServiceController{
dns: fakedns,
serviceCache: &serviceCache{fedServiceMap: make(map[string]*cachedService)},
clusterCache: &clusterClientCache{
rwlock: sync.Mutex{},
clientMap: make(map[string]*clusterCache),
},
knownClusterSet: make(sets.String),
}
tests := []struct {
cluster v1beta1.Cluster
expectAccept bool
name string
serviceController *ServiceController
}{
{
cluster: v1beta1.Cluster{},
expectAccept: false,
name: "empty",
serviceController: &serviceController,
},
{
cluster: v1beta1.Cluster{
Status: v1beta1.ClusterStatus{
Conditions: []v1beta1.ClusterCondition{
{Type: v1beta1.ClusterReady, Status: v1.ConditionTrue},
},
},
},
expectAccept: true,
name: "basic",
serviceController: &serviceController,
},
{
cluster: v1beta1.Cluster{
Status: v1beta1.ClusterStatus{
Conditions: []v1beta1.ClusterCondition{
{Type: v1beta1.ClusterReady, Status: v1.ConditionFalse},
},
},
},
expectAccept: false,
name: "notready",
serviceController: &serviceController,
},
}
pred := getClusterConditionPredicate()
for _, test := range tests {
accept := pred(test.cluster)
if accept != test.expectAccept {
t.Errorf("Test failed for %s, expected %v, saw %v", test.name, test.expectAccept, accept)
}
}
}

View file

@ -0,0 +1,78 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"backoff.go",
"cluster_util.go",
"configmap.go",
"delaying_deliverer.go",
"deployment.go",
"federated_informer.go",
"federated_updater.go",
"handlers.go",
"meta.go",
"secret.go",
"versionize_listoptions.go",
],
tags = ["automanaged"],
deps = [
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/client/clientset_generated/federation_release_1_5:go_default_library",
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/restclient:go_default_library",
"//pkg/client/unversioned/clientcmd:go_default_library",
"//pkg/client/unversioned/clientcmd/api:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//pkg/conversion:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/util/flowcontrol:go_default_library",
"//pkg/util/net:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = [
"delaying_deliverer_test.go",
"deployment_test.go",
"federated_informer_test.go",
"federated_updater_test.go",
"handlers_test.go",
"meta_test.go",
],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/client/clientset_generated/federation_release_1_5/fake:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/stretchr/testify/assert",
],
)

View file

@ -0,0 +1,36 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"time"
"k8s.io/kubernetes/pkg/util/flowcontrol"
)
func StartBackoffGC(backoff *flowcontrol.Backoff, stopCh <-chan struct{}) {
go func() {
for {
select {
case <-time.After(time.Minute):
backoff.GC()
case <-stopCh:
return
}
}
}()
}

View file

@ -0,0 +1,147 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"net"
"os"
"time"
"github.com/golang/glog"
federation_v1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
"k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
utilnet "k8s.io/kubernetes/pkg/util/net"
"k8s.io/kubernetes/pkg/util/wait"
)
const (
KubeAPIQPS = 20.0
KubeAPIBurst = 30
KubeconfigSecretDataKey = "kubeconfig"
getSecretTimeout = 1 * time.Minute
)
func BuildClusterConfig(c *federation_v1beta1.Cluster) (*restclient.Config, error) {
var serverAddress string
var clusterConfig *restclient.Config
hostIP, err := utilnet.ChooseHostInterface()
if err != nil {
return nil, err
}
for _, item := range c.Spec.ServerAddressByClientCIDRs {
_, cidrnet, err := net.ParseCIDR(item.ClientCIDR)
if err != nil {
return nil, err
}
myaddr := net.ParseIP(hostIP.String())
if cidrnet.Contains(myaddr) == true {
serverAddress = item.ServerAddress
break
}
}
if serverAddress != "" {
if c.Spec.SecretRef == nil {
glog.Infof("didn't find secretRef for cluster %s. Trying insecure access", c.Name)
clusterConfig, err = clientcmd.BuildConfigFromFlags(serverAddress, "")
} else {
kubeconfigGetter := KubeconfigGetterForCluster(c)
clusterConfig, err = clientcmd.BuildConfigFromKubeconfigGetter(serverAddress, kubeconfigGetter)
}
if err != nil {
return nil, err
}
clusterConfig.QPS = KubeAPIQPS
clusterConfig.Burst = KubeAPIBurst
}
return clusterConfig, nil
}
// This is to inject a different kubeconfigGetter in tests.
// We don't use the standard one which calls NewInCluster in tests to avoid having to setup service accounts and mount files with secret tokens.
var KubeconfigGetterForCluster = func(c *federation_v1beta1.Cluster) clientcmd.KubeconfigGetter {
return func() (*clientcmdapi.Config, error) {
secretRefName := ""
if c.Spec.SecretRef != nil {
secretRefName = c.Spec.SecretRef.Name
} else {
glog.Infof("didn't find secretRef for cluster %s. Trying insecure access", c.Name)
}
return KubeconfigGetterForSecret(secretRefName)()
}
}
// KubeconfigGettterForSecret is used to get the kubeconfig from the given secret.
var KubeconfigGetterForSecret = func(secretName string) clientcmd.KubeconfigGetter {
return func() (*clientcmdapi.Config, error) {
var data []byte
if secretName != "" {
// Get the namespace this is running in from the env variable.
namespace := os.Getenv("POD_NAMESPACE")
if namespace == "" {
return nil, fmt.Errorf("unexpected: POD_NAMESPACE env var returned empty string")
}
// Get a client to talk to the k8s apiserver, to fetch secrets from it.
cc, err := restclient.InClusterConfig()
if err != nil {
return nil, fmt.Errorf("error in creating in-cluster client: %s", err)
}
client, err := clientset.NewForConfig(cc)
if err != nil {
return nil, fmt.Errorf("error in creating in-cluster client: %s", err)
}
data = []byte{}
var secret *api.Secret
err = wait.PollImmediate(1*time.Second, getSecretTimeout, func() (bool, error) {
secret, err = client.Core().Secrets(namespace).Get(secretName)
if err == nil {
return true, nil
}
glog.Warningf("error in fetching secret: %s", err)
return false, nil
})
if err != nil {
return nil, fmt.Errorf("timed out waiting for secret: %s", err)
}
if secret == nil {
return nil, fmt.Errorf("unexpected: received null secret %s", secretName)
}
ok := false
data, ok = secret.Data[KubeconfigSecretDataKey]
if !ok {
return nil, fmt.Errorf("secret does not have data with key: %s", KubeconfigSecretDataKey)
}
}
return clientcmd.Load(data)
}
}
// Retruns Clientset for the given cluster.
func GetClientsetForCluster(cluster *federation_v1beta1.Cluster) (*fedclientset.Clientset, error) {
clusterConfig, err := BuildClusterConfig(cluster)
if err != nil && clusterConfig != nil {
clientset := fedclientset.NewForConfigOrDie(restclient.AddUserAgent(clusterConfig, userAgentName))
return clientset, nil
}
return nil, err
}

View file

@ -0,0 +1,31 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"reflect"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
)
// Checks if cluster-independent, user provided data in two given ConfigMapss are eqaul. If in
// the future the ConfigMap structure is expanded then any field that is not populated.
// by the api server should be included here.
func ConfigMapEquivalent(s1, s2 *api_v1.ConfigMap) bool {
return ObjectMetaEquivalent(s1.ObjectMeta, s2.ObjectMeta) &&
reflect.DeepEqual(s1.Data, s2.Data)
}

View file

@ -0,0 +1,183 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// TODO: consider moving it to a more generic package.
package util
import (
"container/heap"
"time"
)
const (
// TODO: Investigate what capacity is right.
delayingDelivererUpdateChanCapacity = 1000
)
// DelayingDelivererItem is structure delivered by DelayingDeliverer to the
// target channel.
type DelayingDelivererItem struct {
// Key under which the value was added to deliverer.
Key string
// Value of the item.
Value interface{}
// When the item should be delivered.
DeliveryTime time.Time
}
type delivererHeap struct {
keyPosition map[string]int
data []*DelayingDelivererItem
}
// Functions required by container.Heap.
func (dh *delivererHeap) Len() int { return len(dh.data) }
func (dh *delivererHeap) Less(i, j int) bool {
return dh.data[i].DeliveryTime.Before(dh.data[j].DeliveryTime)
}
func (dh *delivererHeap) Swap(i, j int) {
dh.keyPosition[dh.data[i].Key] = j
dh.keyPosition[dh.data[j].Key] = i
dh.data[i], dh.data[j] = dh.data[j], dh.data[i]
}
func (dh *delivererHeap) Push(x interface{}) {
item := x.(*DelayingDelivererItem)
dh.data = append(dh.data, item)
dh.keyPosition[item.Key] = len(dh.data) - 1
}
func (dh *delivererHeap) Pop() interface{} {
n := len(dh.data)
item := dh.data[n-1]
dh.data = dh.data[:n-1]
delete(dh.keyPosition, item.Key)
return item
}
// A structure that pushes the items to the target channel at a given time.
type DelayingDeliverer struct {
// Channel to deliver the data when their time comes.
targetChannel chan *DelayingDelivererItem
// Store for data
heap *delivererHeap
// Channel to feed the main goroutine with updates.
updateChannel chan *DelayingDelivererItem
// To stop the main goroutine.
stopChannel chan struct{}
}
func NewDelayingDeliverer() *DelayingDeliverer {
return NewDelayingDelivererWithChannel(make(chan *DelayingDelivererItem, 100))
}
func NewDelayingDelivererWithChannel(targetChannel chan *DelayingDelivererItem) *DelayingDeliverer {
return &DelayingDeliverer{
targetChannel: targetChannel,
heap: &delivererHeap{
keyPosition: make(map[string]int),
data: make([]*DelayingDelivererItem, 0),
},
updateChannel: make(chan *DelayingDelivererItem, delayingDelivererUpdateChanCapacity),
stopChannel: make(chan struct{}),
}
}
// Deliver all items due before or equal to timestamp.
func (d *DelayingDeliverer) deliver(timestamp time.Time) {
for d.heap.Len() > 0 {
if timestamp.Before(d.heap.data[0].DeliveryTime) {
return
}
item := heap.Pop(d.heap).(*DelayingDelivererItem)
d.targetChannel <- item
}
}
func (d *DelayingDeliverer) run() {
for {
now := time.Now()
d.deliver(now)
nextWakeUp := now.Add(time.Hour)
if d.heap.Len() > 0 {
nextWakeUp = d.heap.data[0].DeliveryTime
}
sleepTime := nextWakeUp.Sub(now)
select {
case <-time.After(sleepTime):
break // just wake up and process the data
case item := <-d.updateChannel:
if position, found := d.heap.keyPosition[item.Key]; found {
if item.DeliveryTime.Before(d.heap.data[position].DeliveryTime) {
d.heap.data[position] = item
heap.Fix(d.heap, position)
}
// Ignore if later.
} else {
heap.Push(d.heap, item)
}
case <-d.stopChannel:
return
}
}
}
// Starts the DelayingDeliverer.
func (d *DelayingDeliverer) Start() {
go d.run()
}
// Stops the DelayingDeliverer. Undelivered items are discarded.
func (d *DelayingDeliverer) Stop() {
close(d.stopChannel)
}
// Delivers value at the given time.
func (d *DelayingDeliverer) DeliverAt(key string, value interface{}, deliveryTime time.Time) {
d.updateChannel <- &DelayingDelivererItem{
Key: key,
Value: value,
DeliveryTime: deliveryTime,
}
}
// Delivers value after the given delay.
func (d *DelayingDeliverer) DeliverAfter(key string, value interface{}, delay time.Duration) {
d.DeliverAt(key, value, time.Now().Add(delay))
}
// Gets target channel of the deliverer.
func (d *DelayingDeliverer) GetTargetChannel() chan *DelayingDelivererItem {
return d.targetChannel
}
// Starts Delaying deliverer with a handler listening on the target channel.
func (d *DelayingDeliverer) StartWithHandler(handler func(*DelayingDelivererItem)) {
go func() {
for {
select {
case item := <-d.targetChannel:
handler(item)
case <-d.stopChannel:
return
}
}
}()
d.Start()
}

View file

@ -0,0 +1,63 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestDelayingDeliverer(t *testing.T) {
targetChannel := make(chan *DelayingDelivererItem)
now := time.Now()
d := NewDelayingDelivererWithChannel(targetChannel)
d.Start()
defer d.Stop()
startupDelay := time.Second
d.DeliverAt("a", "aaa", now.Add(startupDelay+2*time.Millisecond))
d.DeliverAt("b", "bbb", now.Add(startupDelay+3*time.Millisecond))
d.DeliverAt("c", "ccc", now.Add(startupDelay+1*time.Millisecond))
d.DeliverAt("e", "eee", now.Add(time.Hour))
d.DeliverAt("e", "eee", now)
d.DeliverAt("d", "ddd", now.Add(time.Hour))
i0 := <-targetChannel
assert.Equal(t, "e", i0.Key)
assert.Equal(t, "eee", i0.Value.(string))
assert.Equal(t, now, i0.DeliveryTime)
i1 := <-targetChannel
received1 := time.Now()
assert.True(t, received1.Sub(now).Nanoseconds() > startupDelay.Nanoseconds())
assert.Equal(t, "c", i1.Key)
i2 := <-targetChannel
assert.Equal(t, "a", i2.Key)
i3 := <-targetChannel
assert.Equal(t, "b", i3.Key)
select {
case <-targetChannel:
t.Fatalf("Nothing should be received")
case <-time.After(time.Second):
// Ok. Expected
}
}

View file

@ -0,0 +1,25 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["deletion_helper.go"],
tags = ["automanaged"],
deps = [
"//federation/pkg/federation-controller/util:go_default_library",
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/runtime:go_default_library",
"//vendor:github.com/golang/glog",
],
)

View file

@ -0,0 +1,190 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package to help federation controllers to delete federated resources from
// underlying clusters when the resource is deleted from federation control
// plane.
package deletionhelper
import (
"fmt"
"strings"
"time"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/pkg/api"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/runtime"
"github.com/golang/glog"
)
const (
// Add this finalizer to a federation resource if the resource should be
// deleted from all underlying clusters before being deleted from
// federation control plane.
// This is ignored if FinalizerOrphan is also present on the resource.
// In that case, both finalizers are removed from the resource and the
// resource is deleted from federation control plane without affecting
// the underlying clusters.
FinalizerDeleteFromUnderlyingClusters string = "federation.kubernetes.io/delete-from-underlying-clusters"
)
type HasFinalizerFunc func(runtime.Object, string) bool
type RemoveFinalizerFunc func(runtime.Object, string) (runtime.Object, error)
type AddFinalizerFunc func(runtime.Object, string) (runtime.Object, error)
type ObjNameFunc func(runtime.Object) string
type DeletionHelper struct {
hasFinalizerFunc HasFinalizerFunc
removeFinalizerFunc RemoveFinalizerFunc
addFinalizerFunc AddFinalizerFunc
objNameFunc ObjNameFunc
updateTimeout time.Duration
eventRecorder record.EventRecorder
informer util.FederatedInformer
updater util.FederatedUpdater
}
func NewDeletionHelper(
hasFinalizerFunc HasFinalizerFunc, removeFinalizerFunc RemoveFinalizerFunc,
addFinalizerFunc AddFinalizerFunc, objNameFunc ObjNameFunc,
updateTimeout time.Duration, eventRecorder record.EventRecorder,
informer util.FederatedInformer,
updater util.FederatedUpdater) *DeletionHelper {
return &DeletionHelper{
hasFinalizerFunc: hasFinalizerFunc,
removeFinalizerFunc: removeFinalizerFunc,
addFinalizerFunc: addFinalizerFunc,
objNameFunc: objNameFunc,
updateTimeout: updateTimeout,
eventRecorder: eventRecorder,
informer: informer,
updater: updater,
}
}
// Ensures that the given object has both FinalizerDeleteFromUnderlyingClusters
// and FinalizerOrphan finalizers.
// We do this so that the controller is always notified when a federation resource is deleted.
// If user deletes the resource with nil DeleteOptions or
// DeletionOptions.OrphanDependents = true then the apiserver removes the orphan finalizer
// and deletion helper does a cascading deletion.
// Otherwise, deletion helper just removes the federation resource and orphans
// the corresponding resources in underlying clusters.
// This method should be called before creating objects in underlying clusters.
func (dh *DeletionHelper) EnsureFinalizers(obj runtime.Object) (
runtime.Object, error) {
if !dh.hasFinalizerFunc(obj, FinalizerDeleteFromUnderlyingClusters) {
glog.V(2).Infof("Adding finalizer %s to %s", FinalizerDeleteFromUnderlyingClusters, dh.objNameFunc(obj))
obj, err := dh.addFinalizerFunc(obj, FinalizerDeleteFromUnderlyingClusters)
if err != nil {
return obj, err
}
}
if !dh.hasFinalizerFunc(obj, api_v1.FinalizerOrphan) {
glog.V(2).Infof("Adding finalizer %s to %s", api_v1.FinalizerOrphan, dh.objNameFunc(obj))
obj, err := dh.addFinalizerFunc(obj, api_v1.FinalizerOrphan)
if err != nil {
return obj, err
}
}
return obj, nil
}
// Deletes the resources corresponding to the given federated resource from
// all underlying clusters, unless it has the FinalizerOrphan finalizer.
// Removes FinalizerOrphan and FinalizerDeleteFromUnderlyingClusters finalizers
// when done.
// Callers are expected to keep calling this (with appropriate backoff) until
// it succeeds.
func (dh *DeletionHelper) HandleObjectInUnderlyingClusters(obj runtime.Object) (
runtime.Object, error) {
objName := dh.objNameFunc(obj)
glog.V(2).Infof("Handling deletion of federated dependents for object: %s", objName)
if !dh.hasFinalizerFunc(obj, FinalizerDeleteFromUnderlyingClusters) {
glog.V(2).Infof("obj does not have %s finalizer. Nothing to do", FinalizerDeleteFromUnderlyingClusters)
return obj, nil
}
hasOrphanFinalizer := dh.hasFinalizerFunc(obj, api_v1.FinalizerOrphan)
if hasOrphanFinalizer {
glog.V(2).Infof("Found finalizer orphan. Nothing to do, just remove the finalizer")
// If the obj has FinalizerOrphan finalizer, then we need to orphan the
// corresponding objects in underlying clusters.
// Just remove both the finalizers in that case.
obj, err := dh.removeFinalizerFunc(obj, api_v1.FinalizerOrphan)
if err != nil {
return obj, err
}
return dh.removeFinalizerFunc(obj, FinalizerDeleteFromUnderlyingClusters)
}
glog.V(2).Infof("Deleting obj %s from underlying clusters", objName)
// Else, we need to delete the obj from all underlying clusters.
unreadyClusters, err := dh.informer.GetUnreadyClusters()
if err != nil {
return nil, fmt.Errorf("failed to get a list of unready clusters: %v", err)
}
// TODO: Handle the case when cluster resource is watched after this is executed.
// This can happen if a namespace is deleted before its creation had been
// observed in all underlying clusters.
storeKey := dh.informer.GetTargetStore().GetKeyFor(obj)
clusterNsObjs, err := dh.informer.GetTargetStore().GetFromAllClusters(storeKey)
glog.V(3).Infof("Found %d objects in underlying clusters", len(clusterNsObjs))
if err != nil {
return nil, fmt.Errorf("failed to get object %s from underlying clusters: %v", objName, err)
}
operations := make([]util.FederatedOperation, 0)
for _, clusterNsObj := range clusterNsObjs {
operations = append(operations, util.FederatedOperation{
Type: util.OperationTypeDelete,
ClusterName: clusterNsObj.ClusterName,
Obj: clusterNsObj.Object.(runtime.Object),
})
}
err = dh.updater.UpdateWithOnError(operations, dh.updateTimeout, func(op util.FederatedOperation, operror error) {
objName := dh.objNameFunc(op.Obj)
dh.eventRecorder.Eventf(obj, api.EventTypeNormal, "DeleteInClusterFailed",
"Failed to delete obj %s in cluster %s: %v", objName, op.ClusterName, operror)
})
if err != nil {
return nil, fmt.Errorf("failed to execute updates for obj %s: %v", objName, err)
}
if len(operations) > 0 {
// We have deleted a bunch of resources.
// Wait for the store to observe all the deletions.
var clusterNames []string
for _, op := range operations {
clusterNames = append(clusterNames, op.ClusterName)
}
return nil, fmt.Errorf("waiting for object %s to be deleted from clusters: %s", objName, strings.Join(clusterNames, ", "))
}
// We have now deleted the object from all *ready* clusters.
// But still need to wait for clusters that are not ready to ensure that
// the object has been deleted from *all* clusters.
if len(unreadyClusters) != 0 {
var clusterNames []string
for _, cluster := range unreadyClusters {
clusterNames = append(clusterNames, cluster.Name)
}
return nil, fmt.Errorf("waiting for clusters %s to become ready to verify that obj %s has been deleted", strings.Join(clusterNames, ", "), objName)
}
// All done. Just remove the finalizer.
return dh.removeFinalizerFunc(obj, FinalizerDeleteFromUnderlyingClusters)
}

View file

@ -0,0 +1,75 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"reflect"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
extensions_v1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
deputils "k8s.io/kubernetes/pkg/controller/deployment/util"
)
// Checks if cluster-independent, user provided data in two given Deployment are eqaul.
// This function assumes that revisions are not kept in sync across the clusters.
func DeploymentEquivalent(a, b *extensions_v1.Deployment) bool {
if a.Name != b.Name {
return false
}
if a.Namespace != b.Namespace {
return false
}
if !reflect.DeepEqual(a.Labels, b.Labels) && (len(a.Labels) != 0 || len(b.Labels) != 0) {
return false
}
hasKeysAndVals := func(x, y map[string]string) bool {
if x == nil {
x = map[string]string{}
}
if y == nil {
y = map[string]string{}
}
for k, v := range x {
if k == deputils.RevisionAnnotation {
continue
}
v2, found := y[k]
if !found || v != v2 {
return false
}
}
return true
}
return hasKeysAndVals(a.Annotations, b.Annotations) &&
hasKeysAndVals(b.Annotations, a.Annotations) &&
reflect.DeepEqual(a.Spec, b.Spec)
}
// Copies object meta for Deployment, skipping revision information.
func DeepCopyDeploymentObjectMeta(meta api_v1.ObjectMeta) api_v1.ObjectMeta {
meta = DeepCopyRelevantObjectMeta(meta)
delete(meta.Annotations, deputils.RevisionAnnotation)
return meta
}
// Copies object meta for Deployment, skipping revision information.
func DeepCopyDeployment(a *extensions_v1.Deployment) *extensions_v1.Deployment {
return &extensions_v1.Deployment{
ObjectMeta: DeepCopyDeploymentObjectMeta(a.ObjectMeta),
Spec: DeepCopyApiTypeOrPanic(a.Spec).(extensions_v1.DeploymentSpec),
}
}

View file

@ -0,0 +1,70 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"testing"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
deputils "k8s.io/kubernetes/pkg/controller/deployment/util"
"github.com/stretchr/testify/assert"
)
func TestDeploymentEquivalent(t *testing.T) {
d1 := newDeployment()
d2 := newDeployment()
d2.Annotations = make(map[string]string)
d3 := newDeployment()
d3.Annotations = map[string]string{"a": "b"}
d4 := newDeployment()
d4.Annotations = map[string]string{deputils.RevisionAnnotation: "9"}
assert.True(t, DeploymentEquivalent(d1, d2))
assert.True(t, DeploymentEquivalent(d1, d2))
assert.True(t, DeploymentEquivalent(d1, d4))
assert.True(t, DeploymentEquivalent(d4, d1))
assert.False(t, DeploymentEquivalent(d3, d4))
assert.False(t, DeploymentEquivalent(d3, d1))
assert.True(t, DeploymentEquivalent(d3, d3))
}
func TestDeploymentCopy(t *testing.T) {
d1 := newDeployment()
d1.Annotations = map[string]string{deputils.RevisionAnnotation: "9", "a": "b"}
d2 := DeepCopyDeployment(d1)
assert.True(t, DeploymentEquivalent(d1, d2))
assert.Contains(t, d2.Annotations, "a")
assert.NotContains(t, d2.Annotations, deputils.RevisionAnnotation)
}
func newDeployment() *extensionsv1.Deployment {
replicas := int32(5)
return &extensionsv1.Deployment{
ObjectMeta: apiv1.ObjectMeta{
Name: "wrr",
Namespace: apiv1.NamespaceDefault,
SelfLink: "/api/v1/namespaces/default/deployments/name123",
},
Spec: extensionsv1.DeploymentSpec{
Replicas: &replicas,
},
}
}

View file

@ -0,0 +1,38 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["eventsink.go"],
tags = ["automanaged"],
deps = [
"//federation/client/clientset_generated/federation_release_1_5:go_default_library",
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/client/record:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["eventsink_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//federation/client/clientset_generated/federation_release_1_5/fake:go_default_library",
"//federation/pkg/federation-controller/util/test:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//pkg/runtime:go_default_library",
"//vendor:github.com/stretchr/testify/assert",
],
)

View file

@ -0,0 +1,50 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package eventsink
import (
fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
api "k8s.io/kubernetes/pkg/api"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record"
)
// Implemnts k8s.io/kubernetes/pkg/client/record.EventSink.
type FederatedEventSink struct {
clientset fedclientset.Interface
}
// To check if all required functions are implemented.
var _ record.EventSink = &FederatedEventSink{}
func NewFederatedEventSink(clientset fedclientset.Interface) *FederatedEventSink {
return &FederatedEventSink{
clientset: clientset,
}
}
func (fes *FederatedEventSink) Create(event *api_v1.Event) (*api_v1.Event, error) {
return fes.clientset.Core().Events(event.Namespace).Create(event)
}
func (fes *FederatedEventSink) Update(event *api_v1.Event) (*api_v1.Event, error) {
return fes.clientset.Core().Events(event.Namespace).Update(event)
}
func (fes *FederatedEventSink) Patch(event *api_v1.Event, data []byte) (*api_v1.Event, error) {
return fes.clientset.Core().Events(event.Namespace).Patch(event.Name, api.StrategicMergePatchType, data)
}

View file

@ -0,0 +1,70 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package eventsink
import (
"testing"
fakefedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
. "k8s.io/kubernetes/federation/pkg/federation-controller/util/test"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime"
"github.com/stretchr/testify/assert"
)
func TestEventSink(t *testing.T) {
fakeFederationClient := &fakefedclientset.Clientset{}
createdChan := make(chan runtime.Object, 100)
fakeFederationClient.AddReactor("create", "events", func(action core.Action) (bool, runtime.Object, error) {
createAction := action.(core.CreateAction)
obj := createAction.GetObject()
createdChan <- obj
return true, obj, nil
})
updateChan := make(chan runtime.Object, 100)
fakeFederationClient.AddReactor("update", "events", func(action core.Action) (bool, runtime.Object, error) {
updateAction := action.(core.UpdateAction)
obj := updateAction.GetObject()
updateChan <- obj
return true, obj, nil
})
event := apiv1.Event{
ObjectMeta: apiv1.ObjectMeta{
Name: "bzium",
Namespace: "ns",
},
}
sink := NewFederatedEventSink(fakeFederationClient)
eventUpdated, err := sink.Create(&event)
assert.NoError(t, err)
eventV1 := GetObjectFromChan(createdChan).(*apiv1.Event)
assert.NotNil(t, eventV1)
// Just some simple sanity checks.
assert.Equal(t, event.Name, eventV1.Name)
assert.Equal(t, event.Name, eventUpdated.Name)
eventUpdated, err = sink.Update(&event)
assert.NoError(t, err)
eventV1 = GetObjectFromChan(updateChan).(*apiv1.Event)
assert.NotNil(t, eventV1)
// Just some simple sanity checks.
assert.Equal(t, event.Name, eventV1.Name)
assert.Equal(t, event.Name, eventUpdated.Name)
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,149 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"testing"
"time"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fakefederationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch"
"github.com/stretchr/testify/assert"
)
// Basic test for Federated Informer. Checks whether the subinformer are added and deleted
// when the corresponding cluster entries appear and disappear from etcd.
func TestFederatedInformer(t *testing.T) {
fakeFederationClient := &fakefederationclientset.Clientset{}
// Add a single cluster to federation and remove it when needed.
cluster := federationapi.Cluster{
ObjectMeta: apiv1.ObjectMeta{
Name: "mycluster",
},
Status: federationapi.ClusterStatus{
Conditions: []federationapi.ClusterCondition{
{Type: federationapi.ClusterReady, Status: apiv1.ConditionTrue},
},
},
}
fakeFederationClient.AddReactor("list", "clusters", func(action core.Action) (bool, runtime.Object, error) {
return true, &federationapi.ClusterList{Items: []federationapi.Cluster{cluster}}, nil
})
deleteChan := make(chan struct{})
fakeFederationClient.AddWatchReactor("clusters", func(action core.Action) (bool, watch.Interface, error) {
fakeWatch := watch.NewFake()
go func() {
<-deleteChan
fakeWatch.Delete(&cluster)
}()
return true, fakeWatch, nil
})
fakeKubeClient := &fakekubeclientset.Clientset{}
// There is a single service ns1/s1 in cluster mycluster.
service := apiv1.Service{
ObjectMeta: apiv1.ObjectMeta{
Namespace: "ns1",
Name: "s1",
},
}
fakeKubeClient.AddReactor("list", "services", func(action core.Action) (bool, runtime.Object, error) {
return true, &apiv1.ServiceList{Items: []apiv1.Service{service}}, nil
})
fakeKubeClient.AddWatchReactor("services", func(action core.Action) (bool, watch.Interface, error) {
return true, watch.NewFake(), nil
})
targetInformerFactory := func(cluster *federationapi.Cluster, clientset kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options apiv1.ListOptions) (runtime.Object, error) {
return clientset.Core().Services(apiv1.NamespaceAll).List(options)
},
WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return clientset.Core().Services(apiv1.NamespaceAll).Watch(options)
},
},
&apiv1.Service{},
10*time.Second,
cache.ResourceEventHandlerFuncs{})
}
addedClusters := make(chan string, 1)
deletedClusters := make(chan string, 1)
lifecycle := ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federationapi.Cluster) {
addedClusters <- cluster.Name
close(addedClusters)
},
ClusterUnavailable: func(cluster *federationapi.Cluster, _ []interface{}) {
deletedClusters <- cluster.Name
close(deletedClusters)
},
}
informer := NewFederatedInformer(fakeFederationClient, targetInformerFactory, &lifecycle).(*federatedInformerImpl)
informer.clientFactory = func(cluster *federationapi.Cluster) (kubeclientset.Interface, error) {
return fakeKubeClient, nil
}
assert.NotNil(t, informer)
informer.Start()
// Wait until mycluster is synced.
for !informer.GetTargetStore().ClustersSynced([]*federationapi.Cluster{&cluster}) {
time.Sleep(time.Millisecond * 100)
}
readyClusters, err := informer.GetReadyClusters()
assert.NoError(t, err)
assert.Contains(t, readyClusters, &cluster)
serviceList, err := informer.GetTargetStore().List()
assert.NoError(t, err)
federatedService := FederatedObject{ClusterName: "mycluster", Object: &service}
assert.Contains(t, serviceList, federatedService)
service1, found, err := informer.GetTargetStore().GetByKey("mycluster", "ns1/s1")
assert.NoError(t, err)
assert.True(t, found)
assert.EqualValues(t, &service, service1)
assert.Equal(t, "mycluster", <-addedClusters)
// All checked, lets delete the cluster.
deleteChan <- struct{}{}
for !informer.GetTargetStore().ClustersSynced([]*federationapi.Cluster{}) {
time.Sleep(time.Millisecond * 100)
}
readyClusters, err = informer.GetReadyClusters()
assert.NoError(t, err)
assert.Empty(t, readyClusters)
serviceList, err = informer.GetTargetStore().List()
assert.NoError(t, err)
assert.Empty(t, serviceList)
assert.Equal(t, "mycluster", <-deletedClusters)
// Test complete.
informer.Stop()
}

View file

@ -0,0 +1,122 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"time"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
pkgruntime "k8s.io/kubernetes/pkg/runtime"
)
// Type of the operation that can be executed in Federated.
type FederatedOperationType string
const (
OperationTypeAdd = "add"
OperationTypeUpdate = "update"
OperationTypeDelete = "delete"
)
// FederatedOperation definition contains type (add/update/delete) and the object itself.
type FederatedOperation struct {
Type FederatedOperationType
ClusterName string
Obj pkgruntime.Object
}
// A helper that executes the given set of updates on federation, in parallel.
type FederatedUpdater interface {
// Executes the given set of operations within the specified timeout.
// Timeout is best-effort. There is no guarantee that the underlying operations are
// stopped when it is reached. However the function will return after the timeout
// with a non-nil error.
Update([]FederatedOperation, time.Duration) error
UpdateWithOnError([]FederatedOperation, time.Duration, func(FederatedOperation, error)) error
}
// A function that executes some operation using the passed client and object.
type FederatedOperationHandler func(kubeclientset.Interface, pkgruntime.Object) error
type federatedUpdaterImpl struct {
federation FederationView
addFunction FederatedOperationHandler
updateFunction FederatedOperationHandler
deleteFunction FederatedOperationHandler
}
func NewFederatedUpdater(federation FederationView, add, update, del FederatedOperationHandler) FederatedUpdater {
return &federatedUpdaterImpl{
federation: federation,
addFunction: add,
updateFunction: update,
deleteFunction: del,
}
}
func (fu *federatedUpdaterImpl) Update(ops []FederatedOperation, timeout time.Duration) error {
return fu.UpdateWithOnError(ops, timeout, nil)
}
func (fu *federatedUpdaterImpl) UpdateWithOnError(ops []FederatedOperation, timeout time.Duration, onError func(FederatedOperation, error)) error {
done := make(chan error, len(ops))
for _, op := range ops {
go func(op FederatedOperation) {
clusterName := op.ClusterName
// TODO: Ensure that the clientset has reasonable timeout.
clientset, err := fu.federation.GetClientsetForCluster(clusterName)
if err != nil {
done <- err
return
}
switch op.Type {
case OperationTypeAdd:
err = fu.addFunction(clientset, op.Obj)
case OperationTypeUpdate:
err = fu.updateFunction(clientset, op.Obj)
case OperationTypeDelete:
err = fu.deleteFunction(clientset, op.Obj)
}
if err != nil && onError != nil {
onError(op, err)
}
done <- err
}(op)
}
start := time.Now()
for i := 0; i < len(ops); i++ {
now := time.Now()
if !now.Before(start.Add(timeout)) {
return fmt.Errorf("failed to finish all operations in %v", timeout)
}
select {
case err := <-done:
if err != nil {
return err
}
case <-time.After(start.Add(timeout).Sub(now)):
return fmt.Errorf("failed to finish all operations in %v", timeout)
}
}
// All operations finished in time.
return nil
}

View file

@ -0,0 +1,148 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"testing"
"time"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
pkgruntime "k8s.io/kubernetes/pkg/runtime"
"github.com/stretchr/testify/assert"
)
// Fake federation view.
type fakeFederationView struct {
}
// Verify that fakeFederationView implements FederationView interface
var _ FederationView = &fakeFederationView{}
func (f *fakeFederationView) GetClientsetForCluster(clusterName string) (kubeclientset.Interface, error) {
return &fakekubeclientset.Clientset{}, nil
}
func (f *fakeFederationView) GetReadyClusters() ([]*federationapi.Cluster, error) {
return []*federationapi.Cluster{}, nil
}
func (f *fakeFederationView) GetUnreadyClusters() ([]*federationapi.Cluster, error) {
return []*federationapi.Cluster{}, nil
}
func (f *fakeFederationView) GetReadyCluster(name string) (*federationapi.Cluster, bool, error) {
return nil, false, nil
}
func (f *fakeFederationView) ClustersSynced() bool {
return true
}
func TestFederatedUpdaterOK(t *testing.T) {
addChan := make(chan string, 5)
updateChan := make(chan string, 5)
updater := NewFederatedUpdater(&fakeFederationView{},
func(_ kubeclientset.Interface, obj pkgruntime.Object) error {
service := obj.(*apiv1.Service)
addChan <- service.Name
return nil
},
func(_ kubeclientset.Interface, obj pkgruntime.Object) error {
service := obj.(*apiv1.Service)
updateChan <- service.Name
return nil
},
noop)
err := updater.Update([]FederatedOperation{
{
Type: OperationTypeAdd,
Obj: makeService("A", "s1"),
},
{
Type: OperationTypeUpdate,
Obj: makeService("B", "s2"),
},
}, time.Minute)
assert.NoError(t, err)
add := <-addChan
update := <-updateChan
assert.Equal(t, "s1", add)
assert.Equal(t, "s2", update)
}
func TestFederatedUpdaterError(t *testing.T) {
updater := NewFederatedUpdater(&fakeFederationView{},
func(_ kubeclientset.Interface, obj pkgruntime.Object) error {
return fmt.Errorf("boom")
}, noop, noop)
err := updater.Update([]FederatedOperation{
{
Type: OperationTypeAdd,
Obj: makeService("A", "s1"),
},
{
Type: OperationTypeUpdate,
Obj: makeService("B", "s1"),
},
}, time.Minute)
assert.Error(t, err)
}
func TestFederatedUpdaterTimeout(t *testing.T) {
start := time.Now()
updater := NewFederatedUpdater(&fakeFederationView{},
func(_ kubeclientset.Interface, obj pkgruntime.Object) error {
time.Sleep(time.Minute)
return nil
},
noop, noop)
err := updater.Update([]FederatedOperation{
{
Type: OperationTypeAdd,
Obj: makeService("A", "s1"),
},
{
Type: OperationTypeUpdate,
Obj: makeService("B", "s1"),
},
}, time.Second)
end := time.Now()
assert.Error(t, err)
assert.True(t, start.Add(10*time.Second).After(end))
}
func makeService(cluster, name string) *apiv1.Service {
return &apiv1.Service{
ObjectMeta: apiv1.ObjectMeta{
Namespace: "ns1",
Name: name,
},
}
}
func noop(_ kubeclientset.Interface, _ pkgruntime.Object) error {
return nil
}

View file

@ -0,0 +1,79 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"reflect"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
pkgruntime "k8s.io/kubernetes/pkg/runtime"
)
// Returns cache.ResourceEventHandlerFuncs that trigger the given function
// on all object changes.
func NewTriggerOnAllChanges(triggerFunc func(pkgruntime.Object)) *cache.ResourceEventHandlerFuncs {
return &cache.ResourceEventHandlerFuncs{
DeleteFunc: func(old interface{}) {
oldObj := old.(pkgruntime.Object)
triggerFunc(oldObj)
},
AddFunc: func(cur interface{}) {
curObj := cur.(pkgruntime.Object)
triggerFunc(curObj)
},
UpdateFunc: func(old, cur interface{}) {
curObj := cur.(pkgruntime.Object)
if !reflect.DeepEqual(old, cur) {
triggerFunc(curObj)
}
},
}
}
// Returns cache.ResourceEventHandlerFuncs that trigger the given function
// on object add and delete as well as spec/object meta on update.
func NewTriggerOnMetaAndSpecChanges(triggerFunc func(pkgruntime.Object)) *cache.ResourceEventHandlerFuncs {
getFieldOrPanic := func(obj interface{}, fieldName string) interface{} {
val := reflect.ValueOf(obj).Elem().FieldByName(fieldName)
if val.IsValid() {
return val.Interface()
} else {
panic(fmt.Errorf("field not found: %s", fieldName))
}
}
return &cache.ResourceEventHandlerFuncs{
DeleteFunc: func(old interface{}) {
oldObj := old.(pkgruntime.Object)
triggerFunc(oldObj)
},
AddFunc: func(cur interface{}) {
curObj := cur.(pkgruntime.Object)
triggerFunc(curObj)
},
UpdateFunc: func(old, cur interface{}) {
curObj := cur.(pkgruntime.Object)
oldMeta := getFieldOrPanic(old, "ObjectMeta").(apiv1.ObjectMeta)
curMeta := getFieldOrPanic(cur, "ObjectMeta").(apiv1.ObjectMeta)
if !ObjectMetaEquivalent(oldMeta, curMeta) ||
!reflect.DeepEqual(getFieldOrPanic(old, "Spec"), getFieldOrPanic(cur, "Spec")) {
triggerFunc(curObj)
}
},
}
}

View file

@ -0,0 +1,99 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"testing"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
pkgruntime "k8s.io/kubernetes/pkg/runtime"
"github.com/stretchr/testify/assert"
)
func TestHandlers(t *testing.T) {
// There is a single service ns1/s1 in cluster mycluster.
service := apiv1.Service{
ObjectMeta: apiv1.ObjectMeta{
Namespace: "ns1",
Name: "s1",
},
}
service2 := apiv1.Service{
ObjectMeta: apiv1.ObjectMeta{
Namespace: "ns1",
Name: "s1",
Annotations: map[string]string{
"A": "B",
},
},
}
triggerChan := make(chan struct{}, 1)
triggered := func() bool {
select {
case <-triggerChan:
return true
default:
return false
}
}
trigger := NewTriggerOnAllChanges(
func(obj pkgruntime.Object) {
triggerChan <- struct{}{}
})
trigger.OnAdd(&service)
assert.True(t, triggered())
trigger.OnDelete(&service)
assert.True(t, triggered())
trigger.OnUpdate(&service, &service)
assert.False(t, triggered())
trigger.OnUpdate(&service, &service2)
assert.True(t, triggered())
trigger2 := NewTriggerOnMetaAndSpecChanges(
func(obj pkgruntime.Object) {
triggerChan <- struct{}{}
},
)
trigger2.OnAdd(&service)
assert.True(t, triggered())
trigger2.OnDelete(&service)
assert.True(t, triggered())
trigger2.OnUpdate(&service, &service)
assert.False(t, triggered())
trigger2.OnUpdate(&service, &service2)
assert.True(t, triggered())
service3 := apiv1.Service{
ObjectMeta: apiv1.ObjectMeta{
Namespace: "ns1",
Name: "s1",
},
Status: apiv1.ServiceStatus{
LoadBalancer: apiv1.LoadBalancerStatus{
Ingress: []apiv1.LoadBalancerIngress{{
Hostname: "A",
}},
},
},
}
trigger2.OnUpdate(&service, &service3)
assert.False(t, triggered())
}

View file

@ -0,0 +1,94 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"reflect"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/runtime"
)
// Copies cluster-independent, user provided data from the given ObjectMeta struct. If in
// the future the ObjectMeta structure is expanded then any field that is not populated
// by the api server should be included here.
func copyObjectMeta(obj api_v1.ObjectMeta) api_v1.ObjectMeta {
return api_v1.ObjectMeta{
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
}
}
// Deep copies cluster-independent, user provided data from the given ObjectMeta struct. If in
// the future the ObjectMeta structure is expanded then any field that is not populated
// by the api server should be included here.
func DeepCopyRelevantObjectMeta(obj api_v1.ObjectMeta) api_v1.ObjectMeta {
copyMeta := copyObjectMeta(obj)
if obj.Labels != nil {
copyMeta.Labels = make(map[string]string)
for key, val := range obj.Labels {
copyMeta.Labels[key] = val
}
}
if obj.Annotations != nil {
copyMeta.Annotations = make(map[string]string)
for key, val := range obj.Annotations {
copyMeta.Annotations[key] = val
}
}
return copyMeta
}
// Checks if cluster-independent, user provided data in two given ObjectMeta are equal. If in
// the future the ObjectMeta structure is expanded then any field that is not populated
// by the api server should be included here.
func ObjectMetaEquivalent(a, b api_v1.ObjectMeta) bool {
if a.Name != b.Name {
return false
}
if a.Namespace != b.Namespace {
return false
}
if !reflect.DeepEqual(a.Labels, b.Labels) && (len(a.Labels) != 0 || len(b.Labels) != 0) {
return false
}
if !reflect.DeepEqual(a.Annotations, b.Annotations) && (len(a.Annotations) != 0 || len(b.Annotations) != 0) {
return false
}
return true
}
// Checks if cluster-independent, user provided data in ObjectMeta and Spec in two given top
// level api objects are equivalent.
func ObjectMetaAndSpecEquivalent(a, b runtime.Object) bool {
objectMetaA := reflect.ValueOf(a).Elem().FieldByName("ObjectMeta").Interface().(api_v1.ObjectMeta)
objectMetaB := reflect.ValueOf(b).Elem().FieldByName("ObjectMeta").Interface().(api_v1.ObjectMeta)
specA := reflect.ValueOf(a).Elem().FieldByName("Spec").Interface()
specB := reflect.ValueOf(b).Elem().FieldByName("Spec").Interface()
return ObjectMetaEquivalent(objectMetaA, objectMetaB) && reflect.DeepEqual(specA, specB)
}
func DeepCopyApiTypeOrPanic(item interface{}) interface{} {
result, err := conversion.NewCloner().DeepCopy(item)
if err != nil {
panic(err)
}
return result
}

View file

@ -0,0 +1,116 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"testing"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
"github.com/stretchr/testify/assert"
)
func TestObjectMeta(t *testing.T) {
o1 := api_v1.ObjectMeta{
Namespace: "ns1",
Name: "s1",
UID: "1231231412",
ResourceVersion: "999",
}
o2 := copyObjectMeta(o1)
o3 := api_v1.ObjectMeta{
Namespace: "ns1",
Name: "s1",
UID: "1231231412",
Annotations: map[string]string{"A": "B"},
}
o4 := api_v1.ObjectMeta{
Namespace: "ns1",
Name: "s1",
UID: "1231255531412",
Annotations: map[string]string{"A": "B"},
}
o5 := api_v1.ObjectMeta{
Namespace: "ns1",
Name: "s1",
ResourceVersion: "1231231412",
Annotations: map[string]string{"A": "B"},
}
o6 := api_v1.ObjectMeta{
Namespace: "ns1",
Name: "s1",
ResourceVersion: "1231255531412",
Annotations: map[string]string{"A": "B"},
}
o7 := api_v1.ObjectMeta{
Namespace: "ns1",
Name: "s1",
ResourceVersion: "1231255531412",
Annotations: map[string]string{},
Labels: map[string]string{},
}
o8 := api_v1.ObjectMeta{
Namespace: "ns1",
Name: "s1",
ResourceVersion: "1231255531412",
}
assert.Equal(t, 0, len(o2.UID))
assert.Equal(t, 0, len(o2.ResourceVersion))
assert.Equal(t, o1.Name, o2.Name)
assert.True(t, ObjectMetaEquivalent(o1, o2))
assert.False(t, ObjectMetaEquivalent(o1, o3))
assert.True(t, ObjectMetaEquivalent(o3, o4))
assert.True(t, ObjectMetaEquivalent(o5, o6))
assert.True(t, ObjectMetaEquivalent(o3, o5))
assert.True(t, ObjectMetaEquivalent(o7, o8))
assert.True(t, ObjectMetaEquivalent(o8, o7))
}
func TestObjectMetaAndSpec(t *testing.T) {
s1 := api_v1.Service{
ObjectMeta: api_v1.ObjectMeta{
Namespace: "ns1",
Name: "s1",
},
Spec: api_v1.ServiceSpec{
ExternalName: "Service1",
},
}
s1b := s1
s2 := api_v1.Service{
ObjectMeta: api_v1.ObjectMeta{
Namespace: "ns1",
Name: "s2",
},
Spec: api_v1.ServiceSpec{
ExternalName: "Service1",
},
}
s3 := api_v1.Service{
ObjectMeta: api_v1.ObjectMeta{
Namespace: "ns1",
Name: "s1",
},
Spec: api_v1.ServiceSpec{
ExternalName: "Service2",
},
}
assert.True(t, ObjectMetaAndSpecEquivalent(&s1, &s1b))
assert.False(t, ObjectMetaAndSpecEquivalent(&s1, &s2))
assert.False(t, ObjectMetaAndSpecEquivalent(&s1, &s3))
assert.False(t, ObjectMetaAndSpecEquivalent(&s2, &s3))
}

View file

@ -0,0 +1,29 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["planner.go"],
tags = ["automanaged"],
deps = ["//federation/apis/federation:go_default_library"],
)
go_test(
name = "go_default_test",
srcs = ["planner_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//federation/apis/federation:go_default_library",
"//vendor:github.com/stretchr/testify/assert",
],
)

View file

@ -0,0 +1,238 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package planner
import (
"hash/fnv"
"sort"
fedapi "k8s.io/kubernetes/federation/apis/federation"
)
// Planner decides how many out of the given replicas should be placed in each of the
// federated clusters.
type Planner struct {
preferences *fedapi.FederatedReplicaSetPreferences
}
type namedClusterReplicaSetPreferences struct {
clusterName string
hash uint32
fedapi.ClusterReplicaSetPreferences
}
type byWeight []*namedClusterReplicaSetPreferences
func (a byWeight) Len() int { return len(a) }
func (a byWeight) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// Preferences are sorted according by decreasing weight and increasing hash (built on top of cluster name and rs name).
// Sorting is made by a hash to avoid assigning single-replica rs to the alphabetically smallest cluster.
func (a byWeight) Less(i, j int) bool {
return (a[i].Weight > a[j].Weight) || (a[i].Weight == a[j].Weight && a[i].hash < a[j].hash)
}
func NewPlanner(preferences *fedapi.FederatedReplicaSetPreferences) *Planner {
return &Planner{
preferences: preferences,
}
}
// Distribute the desired number of replicas among the given cluster according to the planner preferences.
// The function tries its best to assign each cluster the preferred number of replicas, however if
// sum of MinReplicas for all cluster is bigger thant replicasToDistribute then some cluster will not
// have all of the replicas assigned. In such case a cluster with higher weight has priority over
// cluster with lower weight (or with lexicographically smaller name in case of draw).
// It can also use the current replica count and estimated capacity to provide better planning and
// adhere to rebalance policy. To avoid prioritization of clusters with smaller lexiconographical names
// a semi-random string (like replica set name) can be provided.
// Two maps are returned:
// * a map that contains information how many replicas will be possible to run in a cluster.
// * a map that contains information how many extra replicas would be nice to schedule in a cluster so,
// if by chance, they are scheudled we will be closer to the desired replicas layout.
func (p *Planner) Plan(replicasToDistribute int64, availableClusters []string, currentReplicaCount map[string]int64,
estimatedCapacity map[string]int64, replicaSetKey string) (map[string]int64, map[string]int64) {
preferences := make([]*namedClusterReplicaSetPreferences, 0, len(availableClusters))
plan := make(map[string]int64, len(preferences))
overflow := make(map[string]int64, len(preferences))
named := func(name string, pref fedapi.ClusterReplicaSetPreferences) *namedClusterReplicaSetPreferences {
// Seems to work better than addler for our case.
hasher := fnv.New32()
hasher.Write([]byte(name))
hasher.Write([]byte(replicaSetKey))
return &namedClusterReplicaSetPreferences{
clusterName: name,
hash: hasher.Sum32(),
ClusterReplicaSetPreferences: pref,
}
}
for _, cluster := range availableClusters {
if localRSP, found := p.preferences.Clusters[cluster]; found {
preferences = append(preferences, named(cluster, localRSP))
} else {
if localRSP, found := p.preferences.Clusters["*"]; found {
preferences = append(preferences, named(cluster, localRSP))
} else {
plan[cluster] = int64(0)
}
}
}
sort.Sort(byWeight(preferences))
remainingReplicas := replicasToDistribute
// Assign each cluster the minimum number of replicas it requested.
for _, preference := range preferences {
min := minInt64(preference.MinReplicas, remainingReplicas)
if capacity, hasCapacity := estimatedCapacity[preference.clusterName]; hasCapacity {
min = minInt64(min, capacity)
}
remainingReplicas -= min
plan[preference.clusterName] = min
}
// This map contains information how many replicas were assigned to
// the cluster based only on the current replica count and
// rebalance=false preference. It will be later used in remaining replica
// distribution code.
preallocated := make(map[string]int64)
if p.preferences.Rebalance == false {
for _, preference := range preferences {
planned := plan[preference.clusterName]
count, hasSome := currentReplicaCount[preference.clusterName]
if hasSome && count > planned {
target := count
if preference.MaxReplicas != nil {
target = minInt64(*preference.MaxReplicas, target)
}
if capacity, hasCapacity := estimatedCapacity[preference.clusterName]; hasCapacity {
target = minInt64(capacity, target)
}
extra := minInt64(target-planned, remainingReplicas)
if extra < 0 {
extra = 0
}
remainingReplicas -= extra
preallocated[preference.clusterName] = extra
plan[preference.clusterName] = extra + planned
}
}
}
modified := true
// It is possible single pass of the loop is not enough to distribue all replicas among clusters due
// to weight, max and rounding corner cases. In such case we iterate until either
// there is no replicas or no cluster gets any more replicas or the number
// of attempts is less than available cluster count. If there is no preallocated pods
// every loop either distributes all remainingReplicas or maxes out at least one cluster.
// If there are preallocated then the replica spreading may take longer.
// We reduce the number of pending preallocated replicas by at least half with each iteration so
// we may need log(replicasAtStart) iterations.
// TODO: Prove that clusterCount * log(replicas) iterations solves the problem or adjust the number.
// TODO: This algorithm is O(clusterCount^2 * log(replicas)) which is good for up to 100 clusters.
// Find something faster.
for trial := 0; modified && remainingReplicas > 0; trial++ {
modified = false
weightSum := int64(0)
for _, preference := range preferences {
weightSum += preference.Weight
}
newPreferences := make([]*namedClusterReplicaSetPreferences, 0, len(preferences))
distributeInThisLoop := remainingReplicas
for _, preference := range preferences {
if weightSum > 0 {
start := plan[preference.clusterName]
// Distribute the remaining replicas, rounding fractions always up.
extra := (distributeInThisLoop*preference.Weight + weightSum - 1) / weightSum
extra = minInt64(extra, remainingReplicas)
// Account preallocated.
prealloc := preallocated[preference.clusterName]
usedPrealloc := minInt64(extra, prealloc)
preallocated[preference.clusterName] = prealloc - usedPrealloc
extra = extra - usedPrealloc
if usedPrealloc > 0 {
modified = true
}
// In total there should be the amount that was there at start plus whatever is due
// in this iteration
total := start + extra
// Check if we don't overflow the cluster, and if yes don't consider this cluster
// in any of the following iterations.
full := false
if preference.MaxReplicas != nil && total > *preference.MaxReplicas {
total = *preference.MaxReplicas
full = true
}
if capacity, hasCapacity := estimatedCapacity[preference.clusterName]; hasCapacity && total > capacity {
overflow[preference.clusterName] = total - capacity
total = capacity
full = true
}
if !full {
newPreferences = append(newPreferences, preference)
}
// Only total-start replicas were actually taken.
remainingReplicas -= (total - start)
plan[preference.clusterName] = total
// Something extra got scheduled on this cluster.
if total > start {
modified = true
}
} else {
break
}
}
preferences = newPreferences
}
if p.preferences.Rebalance {
return plan, overflow
} else {
// If rebalance = false then overflow is trimmed at the level
// of replicas that it failed to place somewhere.
newOverflow := make(map[string]int64)
for key, value := range overflow {
value = minInt64(value, remainingReplicas)
if value > 0 {
newOverflow[key] = value
}
}
return plan, newOverflow
}
}
func minInt64(a int64, b int64) int64 {
if a < b {
return a
}
return b
}

View file

@ -0,0 +1,348 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package planner
import (
"testing"
fedapi "k8s.io/kubernetes/federation/apis/federation"
"github.com/stretchr/testify/assert"
)
func doCheck(t *testing.T, pref map[string]fedapi.ClusterReplicaSetPreferences, replicas int64, clusters []string, expected map[string]int64) {
planer := NewPlanner(&fedapi.FederatedReplicaSetPreferences{
Clusters: pref,
})
plan, overflow := planer.Plan(replicas, clusters, map[string]int64{}, map[string]int64{}, "")
assert.EqualValues(t, expected, plan)
assert.Equal(t, 0, len(overflow))
}
func doCheckWithExisting(t *testing.T, pref map[string]fedapi.ClusterReplicaSetPreferences, replicas int64, clusters []string,
existing map[string]int64, expected map[string]int64) {
planer := NewPlanner(&fedapi.FederatedReplicaSetPreferences{
Clusters: pref,
})
plan, overflow := planer.Plan(replicas, clusters, existing, map[string]int64{}, "")
assert.Equal(t, 0, len(overflow))
assert.EqualValues(t, expected, plan)
}
func doCheckWithExistingAndCapacity(t *testing.T, rebalance bool, pref map[string]fedapi.ClusterReplicaSetPreferences, replicas int64, clusters []string,
existing map[string]int64,
capacity map[string]int64,
expected map[string]int64,
expectedOverflow map[string]int64) {
planer := NewPlanner(&fedapi.FederatedReplicaSetPreferences{
Rebalance: rebalance,
Clusters: pref,
})
plan, overflow := planer.Plan(replicas, clusters, existing, capacity, "")
assert.EqualValues(t, expected, plan)
assert.Equal(t, expectedOverflow, overflow)
}
func pint(val int64) *int64 {
return &val
}
func TestEqual(t *testing.T) {
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
50, []string{"A", "B", "C"},
// hash dependent
map[string]int64{"A": 16, "B": 17, "C": 17})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
50, []string{"A", "B"},
map[string]int64{"A": 25, "B": 25})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
1, []string{"A", "B"},
// hash dependent
map[string]int64{"A": 0, "B": 1})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
1, []string{"A", "B", "C", "D"},
// hash dependent
map[string]int64{"A": 0, "B": 0, "C": 0, "D": 1})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
1, []string{"A"},
map[string]int64{"A": 1})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
1, []string{},
map[string]int64{})
}
func TestEqualWithExisting(t *testing.T) {
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
50, []string{"A", "B", "C"},
map[string]int64{"C": 30},
map[string]int64{"A": 10, "B": 10, "C": 30})
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
50, []string{"A", "B"},
map[string]int64{"A": 30},
map[string]int64{"A": 30, "B": 20})
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
15, []string{"A", "B"},
map[string]int64{"A": 0, "B": 8},
map[string]int64{"A": 7, "B": 8})
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
15, []string{"A", "B"},
map[string]int64{"A": 1, "B": 8},
map[string]int64{"A": 7, "B": 8})
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
15, []string{"A", "B"},
map[string]int64{"A": 4, "B": 8},
map[string]int64{"A": 7, "B": 8})
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
15, []string{"A", "B"},
map[string]int64{"A": 5, "B": 8},
map[string]int64{"A": 7, "B": 8})
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
15, []string{"A", "B"},
map[string]int64{"A": 6, "B": 8},
map[string]int64{"A": 7, "B": 8})
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
15, []string{"A", "B"},
map[string]int64{"A": 7, "B": 8},
map[string]int64{"A": 7, "B": 8})
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
500000, []string{"A", "B"},
map[string]int64{"A": 300000},
map[string]int64{"A": 300000, "B": 200000})
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
50, []string{"A", "B"},
map[string]int64{"A": 10},
map[string]int64{"A": 25, "B": 25})
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
50, []string{"A", "B"},
map[string]int64{"A": 10, "B": 70},
// hash dependent
// TODO: Should be 10:40, update algorithm. Issue: #31816
map[string]int64{"A": 0, "B": 50})
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
1, []string{"A", "B"},
map[string]int64{"A": 30},
map[string]int64{"A": 1, "B": 0})
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
50, []string{"A", "B"},
map[string]int64{"A": 10, "B": 20},
map[string]int64{"A": 25, "B": 25})
}
func TestWithExistingAndCapacity(t *testing.T) {
// desired without capacity: map[string]int64{"A": 17, "B": 17, "C": 16})
doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
50, []string{"A", "B", "C"},
map[string]int64{},
map[string]int64{"C": 10},
map[string]int64{"A": 20, "B": 20, "C": 10},
map[string]int64{"C": 7})
// desired B:50 C:0
doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 10000},
"B": {Weight: 1}},
50, []string{"B", "C"},
map[string]int64{},
map[string]int64{"B": 10},
map[string]int64{"B": 10, "C": 0},
map[string]int64{"B": 40},
)
// desired A:20 B:40
doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 1},
"B": {Weight: 2}},
60, []string{"A", "B", "C"},
map[string]int64{},
map[string]int64{"B": 10},
map[string]int64{"A": 50, "B": 10, "C": 0},
map[string]int64{"B": 30})
// map[string]int64{"A": 10, "B": 30, "C": 21, "D": 10})
doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 10000, MaxReplicas: pint(10)},
"B": {Weight: 1},
"C": {Weight: 1, MaxReplicas: pint(21)},
"D": {Weight: 1, MaxReplicas: pint(10)}},
71, []string{"A", "B", "C", "D"},
map[string]int64{},
map[string]int64{"C": 10},
map[string]int64{"A": 10, "B": 41, "C": 10, "D": 10},
map[string]int64{"C": 11},
)
// desired A:20 B:20
doCheckWithExistingAndCapacity(t, false, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 1},
"B": {Weight: 1}},
60, []string{"A", "B", "C"},
map[string]int64{},
map[string]int64{"A": 10, "B": 10},
map[string]int64{"A": 10, "B": 10, "C": 0},
map[string]int64{"A": 20, "B": 20})
// desired A:10 B:50 although A:50 B:10 is fuly acceptable because rebalance = false
doCheckWithExistingAndCapacity(t, false, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 1},
"B": {Weight: 5}},
60, []string{"A", "B", "C"},
map[string]int64{},
map[string]int64{"B": 10},
map[string]int64{"A": 50, "B": 10, "C": 0},
map[string]int64{})
doCheckWithExistingAndCapacity(t, false, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {MinReplicas: 20, Weight: 0}},
50, []string{"A", "B", "C"},
map[string]int64{},
map[string]int64{"B": 10},
map[string]int64{"A": 20, "B": 10, "C": 20},
map[string]int64{})
// Actually we would like to have extra 20 in B but 15 is also good.
doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {MinReplicas: 20, Weight: 1}},
60, []string{"A", "B"},
map[string]int64{},
map[string]int64{"B": 10},
map[string]int64{"A": 50, "B": 10},
map[string]int64{"B": 15})
}
func TestMin(t *testing.T) {
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {MinReplicas: 2, Weight: 0}},
50, []string{"A", "B", "C"},
map[string]int64{"A": 2, "B": 2, "C": 2})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {MinReplicas: 20, Weight: 0}},
50, []string{"A", "B", "C"},
// hash dependant.
map[string]int64{"A": 10, "B": 20, "C": 20})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {MinReplicas: 20, Weight: 0},
"A": {MinReplicas: 100, Weight: 1}},
50, []string{"A", "B", "C"},
map[string]int64{"A": 50, "B": 0, "C": 0})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {MinReplicas: 10, Weight: 1, MaxReplicas: pint(12)}},
50, []string{"A", "B", "C"},
map[string]int64{"A": 12, "B": 12, "C": 12})
}
func TestMax(t *testing.T) {
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1, MaxReplicas: pint(2)}},
50, []string{"A", "B", "C"},
map[string]int64{"A": 2, "B": 2, "C": 2})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 0, MaxReplicas: pint(2)}},
50, []string{"A", "B", "C"},
map[string]int64{"A": 0, "B": 0, "C": 0})
}
func TestWeight(t *testing.T) {
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 1},
"B": {Weight: 2}},
60, []string{"A", "B", "C"},
map[string]int64{"A": 20, "B": 40, "C": 0})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 10000},
"B": {Weight: 1}},
50, []string{"A", "B", "C"},
map[string]int64{"A": 50, "B": 0, "C": 0})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 10000},
"B": {Weight: 1}},
50, []string{"B", "C"},
map[string]int64{"B": 50, "C": 0})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 10000, MaxReplicas: pint(10)},
"B": {Weight: 1},
"C": {Weight: 1}},
50, []string{"A", "B", "C"},
map[string]int64{"A": 10, "B": 20, "C": 20})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 10000, MaxReplicas: pint(10)},
"B": {Weight: 1},
"C": {Weight: 1, MaxReplicas: pint(10)}},
50, []string{"A", "B", "C"},
map[string]int64{"A": 10, "B": 30, "C": 10})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 10000, MaxReplicas: pint(10)},
"B": {Weight: 1},
"C": {Weight: 1, MaxReplicas: pint(21)},
"D": {Weight: 1, MaxReplicas: pint(10)}},
71, []string{"A", "B", "C", "D"},
map[string]int64{"A": 10, "B": 30, "C": 21, "D": 10})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 10000, MaxReplicas: pint(10)},
"B": {Weight: 1},
"C": {Weight: 1, MaxReplicas: pint(21)},
"D": {Weight: 1, MaxReplicas: pint(10)},
"E": {Weight: 1}},
91, []string{"A", "B", "C", "D", "E"},
map[string]int64{"A": 10, "B": 25, "C": 21, "D": 10, "E": 25})
}

View file

@ -0,0 +1,37 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["pod_helper.go"],
tags = ["automanaged"],
deps = [
"//federation/pkg/federation-controller/util:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/labels:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["pod_helper_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//federation/pkg/federation-controller/util:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//vendor:github.com/stretchr/testify/assert",
],
)

View file

@ -0,0 +1,82 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podanalyzer
import (
"fmt"
"time"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/labels"
)
type PodAnalysisResult struct {
// Total number of pods created.
Total int
// Number of pods that are running and ready.
RunningAndReady int
// Number of pods that have been in unschedulable state for UnshedulableThreshold seconds.
Unschedulable int
// TODO: Handle other scenarios like pod waiting too long for scheduler etc.
}
const (
// TODO: make it configurable
UnschedulableThreshold = 60 * time.Second
)
// A function that calculates how many pods from the list are in one of
// the meaningful (from the replica set perspective) states. This function is
// a temporary workaround against the current lack of ownerRef in pods.
func AnalysePods(selectorv1 *metav1.LabelSelector, allPods []util.FederatedObject, currentTime time.Time) (map[string]PodAnalysisResult, error) {
selector, err := metav1.LabelSelectorAsSelector(selectorv1)
if err != nil {
return nil, fmt.Errorf("invalid selector: %v", err)
}
result := make(map[string]PodAnalysisResult)
for _, fedObject := range allPods {
pod, isPod := fedObject.Object.(*api_v1.Pod)
if !isPod {
return nil, fmt.Errorf("invalid arg content - not a *pod")
}
if !selector.Empty() && selector.Matches(labels.Set(pod.Labels)) {
status := result[fedObject.ClusterName]
status.Total++
for _, condition := range pod.Status.Conditions {
if pod.Status.Phase == api_v1.PodRunning {
if condition.Type == api_v1.PodReady {
status.RunningAndReady++
}
} else {
if condition.Type == api_v1.PodScheduled &&
condition.Status == api_v1.ConditionFalse &&
condition.Reason == "Unschedulable" &&
condition.LastTransitionTime.Add(UnschedulableThreshold).Before(currentTime) {
status.Unschedulable++
}
}
}
result[fedObject.ClusterName] = status
}
}
return result, nil
}

View file

@ -0,0 +1,119 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podanalyzer
import (
"testing"
"time"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"github.com/stretchr/testify/assert"
)
func TestAnalyze(t *testing.T) {
now := time.Now()
replicaSet := newReplicaSet(map[string]string{"A": "B"})
replicaSet2 := newReplicaSet(map[string]string{"C": "D"})
podRunning := newPod("p1", replicaSet,
api_v1.PodStatus{
Phase: api_v1.PodRunning,
Conditions: []api_v1.PodCondition{
{
Type: api_v1.PodReady,
Status: api_v1.ConditionTrue,
},
},
})
podUnschedulable := newPod("pU", replicaSet,
api_v1.PodStatus{
Phase: api_v1.PodPending,
Conditions: []api_v1.PodCondition{
{
Type: api_v1.PodScheduled,
Status: api_v1.ConditionFalse,
Reason: "Unschedulable",
LastTransitionTime: metav1.Time{Time: now.Add(-10 * time.Minute)},
},
},
})
podOther := newPod("pO", replicaSet,
api_v1.PodStatus{
Phase: api_v1.PodPending,
Conditions: []api_v1.PodCondition{},
})
podOtherRS := newPod("pO", replicaSet2,
api_v1.PodStatus{
Phase: api_v1.PodPending,
Conditions: []api_v1.PodCondition{},
})
federatedObjects := []util.FederatedObject{
{ClusterName: "c1", Object: podRunning},
{ClusterName: "c1", Object: podRunning},
{ClusterName: "c1", Object: podRunning},
{ClusterName: "c1", Object: podUnschedulable},
{ClusterName: "c1", Object: podUnschedulable},
{ClusterName: "c2", Object: podOther},
{ClusterName: "c2", Object: podOtherRS},
}
raport, err := AnalysePods(replicaSet.Spec.Selector, federatedObjects, now)
assert.NoError(t, err)
assert.Equal(t, 2, len(raport))
c1Raport := raport["c1"]
c2Raport := raport["c2"]
assert.Equal(t, PodAnalysisResult{
Total: 5,
RunningAndReady: 3,
Unschedulable: 2,
}, c1Raport)
assert.Equal(t, PodAnalysisResult{
Total: 1,
RunningAndReady: 0,
Unschedulable: 0,
}, c2Raport)
}
func newReplicaSet(selectorMap map[string]string) *v1beta1.ReplicaSet {
replicas := int32(3)
rs := &v1beta1.ReplicaSet{
ObjectMeta: api_v1.ObjectMeta{
Name: "foobar",
Namespace: "default",
},
Spec: v1beta1.ReplicaSetSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{MatchLabels: selectorMap},
},
}
return rs
}
func newPod(name string, rs *v1beta1.ReplicaSet, status api_v1.PodStatus) *api_v1.Pod {
return &api_v1.Pod{
ObjectMeta: api_v1.ObjectMeta{
Name: name,
Namespace: rs.Namespace,
Labels: rs.Spec.Selector.MatchLabels,
},
Status: status,
}
}

View file

@ -0,0 +1,32 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"reflect"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
)
// Checks if cluster-independent, user provided data in two given Secrets are eqaul. If in
// the future the Secret structure is expanded then any field that is not populated.
// by the api server should be included here.
func SecretEquivalent(s1, s2 api_v1.Secret) bool {
return ObjectMetaEquivalent(s1.ObjectMeta, s2.ObjectMeta) &&
reflect.DeepEqual(s1.Data, s2.Data) &&
reflect.DeepEqual(s1.Type, s2.Type)
}

View file

@ -0,0 +1,28 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["test_helper.go"],
tags = ["automanaged"],
deps = [
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/pkg/federation-controller/util:go_default_library",
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/golang/glog",
],
)

View file

@ -0,0 +1,324 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testutil
import (
"fmt"
"os"
"reflect"
"runtime/pprof"
"sync"
"time"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/pkg/api"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/watch"
"github.com/golang/glog"
)
// A structure that distributes eventes to multiple watchers.
type WatcherDispatcher struct {
sync.Mutex
watchers []*watch.RaceFreeFakeWatcher
eventsSoFar []*watch.Event
orderExecution chan func()
stopChan chan struct{}
}
func (wd *WatcherDispatcher) register(watcher *watch.RaceFreeFakeWatcher) {
wd.Lock()
defer wd.Unlock()
wd.watchers = append(wd.watchers, watcher)
for _, event := range wd.eventsSoFar {
watcher.Action(event.Type, event.Object)
}
}
func (wd *WatcherDispatcher) Stop() {
wd.Lock()
defer wd.Unlock()
close(wd.stopChan)
for _, watcher := range wd.watchers {
watcher.Stop()
}
}
func copy(obj runtime.Object) runtime.Object {
objCopy, err := api.Scheme.DeepCopy(obj)
if err != nil {
panic(err)
}
return objCopy.(runtime.Object)
}
// Add sends an add event.
func (wd *WatcherDispatcher) Add(obj runtime.Object) {
wd.Lock()
defer wd.Unlock()
wd.eventsSoFar = append(wd.eventsSoFar, &watch.Event{Type: watch.Added, Object: copy(obj)})
for _, watcher := range wd.watchers {
if !watcher.IsStopped() {
watcher.Add(copy(obj))
}
}
}
// Modify sends a modify event.
func (wd *WatcherDispatcher) Modify(obj runtime.Object) {
wd.Lock()
defer wd.Unlock()
glog.V(4).Infof("->WatcherDispatcher.Modify(%v)", obj)
wd.eventsSoFar = append(wd.eventsSoFar, &watch.Event{Type: watch.Modified, Object: copy(obj)})
for i, watcher := range wd.watchers {
if !watcher.IsStopped() {
glog.V(4).Infof("->Watcher(%d).Modify(%v)", i, obj)
watcher.Modify(copy(obj))
} else {
glog.V(4).Infof("->Watcher(%d) is stopped. Not calling Modify(%v)", i, obj)
}
}
}
// Delete sends a delete event.
func (wd *WatcherDispatcher) Delete(lastValue runtime.Object) {
wd.Lock()
defer wd.Unlock()
wd.eventsSoFar = append(wd.eventsSoFar, &watch.Event{Type: watch.Deleted, Object: copy(lastValue)})
for _, watcher := range wd.watchers {
if !watcher.IsStopped() {
watcher.Delete(copy(lastValue))
}
}
}
// Error sends an Error event.
func (wd *WatcherDispatcher) Error(errValue runtime.Object) {
wd.Lock()
defer wd.Unlock()
wd.eventsSoFar = append(wd.eventsSoFar, &watch.Event{Type: watch.Error, Object: copy(errValue)})
for _, watcher := range wd.watchers {
if !watcher.IsStopped() {
watcher.Error(copy(errValue))
}
}
}
// Action sends an event of the requested type, for table-based testing.
func (wd *WatcherDispatcher) Action(action watch.EventType, obj runtime.Object) {
wd.Lock()
defer wd.Unlock()
wd.eventsSoFar = append(wd.eventsSoFar, &watch.Event{Type: action, Object: copy(obj)})
for _, watcher := range wd.watchers {
if !watcher.IsStopped() {
watcher.Action(action, copy(obj))
}
}
}
// RegisterFakeWatch adds a new fake watcher for the specified resource in the given fake client.
// All subsequent requests for a watch on the client will result in returning this fake watcher.
func RegisterFakeWatch(resource string, client *core.Fake) *WatcherDispatcher {
dispatcher := &WatcherDispatcher{
watchers: make([]*watch.RaceFreeFakeWatcher, 0),
eventsSoFar: make([]*watch.Event, 0),
orderExecution: make(chan func()),
stopChan: make(chan struct{}),
}
go func() {
for {
select {
case fun := <-dispatcher.orderExecution:
fun()
case <-dispatcher.stopChan:
return
}
}
}()
client.AddWatchReactor(resource, func(action core.Action) (bool, watch.Interface, error) {
watcher := watch.NewRaceFreeFake()
dispatcher.register(watcher)
return true, watcher, nil
})
return dispatcher
}
// RegisterFakeList registers a list response for the specified resource inside the given fake client.
// The passed value will be returned with every list call.
func RegisterFakeList(resource string, client *core.Fake, obj runtime.Object) {
client.AddReactor("list", resource, func(action core.Action) (bool, runtime.Object, error) {
return true, obj, nil
})
}
// RegisterFakeCopyOnCreate registers a reactor in the given fake client that passes
// all created objects to the given watcher and also copies them to a channel for
// in-test inspection.
func RegisterFakeCopyOnCreate(resource string, client *core.Fake, watcher *WatcherDispatcher) chan runtime.Object {
objChan := make(chan runtime.Object, 100)
client.AddReactor("create", resource, func(action core.Action) (bool, runtime.Object, error) {
createAction := action.(core.CreateAction)
originalObj := createAction.GetObject()
// Create a copy of the object here to prevent data races while reading the object in go routine.
obj := copy(originalObj)
watcher.orderExecution <- func() {
glog.V(4).Infof("Object created. Writing to channel: %v", obj)
watcher.Add(obj)
objChan <- obj
}
return true, originalObj, nil
})
return objChan
}
// RegisterFakeCopyOnUpdate registers a reactor in the given fake client that passes
// all updated objects to the given watcher and also copies them to a channel for
// in-test inspection.
func RegisterFakeCopyOnUpdate(resource string, client *core.Fake, watcher *WatcherDispatcher) chan runtime.Object {
objChan := make(chan runtime.Object, 100)
client.AddReactor("update", resource, func(action core.Action) (bool, runtime.Object, error) {
updateAction := action.(core.UpdateAction)
originalObj := updateAction.GetObject()
// Create a copy of the object here to prevent data races while reading the object in go routine.
obj := copy(originalObj)
watcher.orderExecution <- func() {
glog.V(4).Infof("Object updated. Writing to channel: %v", obj)
watcher.Modify(obj)
objChan <- obj
}
return true, originalObj, nil
})
return objChan
}
// GetObjectFromChan tries to get an api object from the given channel
// within a reasonable time.
func GetObjectFromChan(c chan runtime.Object) runtime.Object {
select {
case obj := <-c:
return obj
case <-time.After(wait.ForeverTestTimeout):
pprof.Lookup("goroutine").WriteTo(os.Stderr, 1)
return nil
}
}
type CheckingFunction func(runtime.Object) error
// CheckObjectFromChan tries to get an object matching the given check function
// within a reasonable time.
func CheckObjectFromChan(c chan runtime.Object, checkFunction CheckingFunction) error {
delay := 20 * time.Second
var lastError error
for {
select {
case obj := <-c:
if lastError = checkFunction(obj); lastError == nil {
return nil
}
glog.Infof("Check function failed with %v", lastError)
delay = 5 * time.Second
case <-time.After(delay):
pprof.Lookup("goroutine").WriteTo(os.Stderr, 1)
if lastError == nil {
return fmt.Errorf("Failed to get an object from channel")
} else {
return lastError
}
}
}
}
// CompareObjectMeta returns an error when the given objects are not equivalent.
func CompareObjectMeta(a, b apiv1.ObjectMeta) error {
if a.Namespace != b.Namespace {
return fmt.Errorf("Different namespace expected:%s observed:%s", a.Namespace, b.Namespace)
}
if a.Name != b.Name {
return fmt.Errorf("Different name expected:%s observed:%s", a.Namespace, b.Namespace)
}
if !reflect.DeepEqual(a.Labels, b.Labels) && (len(a.Labels) != 0 || len(b.Labels) != 0) {
return fmt.Errorf("Labels are different expected:%v observerd:%v", a.Labels, b.Labels)
}
if !reflect.DeepEqual(a.Annotations, b.Annotations) && (len(a.Annotations) != 0 || len(b.Annotations) != 0) {
return fmt.Errorf("Annotations are different expected:%v observerd:%v", a.Annotations, b.Annotations)
}
return nil
}
func ToFederatedInformerForTestOnly(informer util.FederatedInformer) util.FederatedInformerForTestOnly {
inter := informer.(interface{})
return inter.(util.FederatedInformerForTestOnly)
}
// NewCluster builds a new cluster object.
func NewCluster(name string, readyStatus apiv1.ConditionStatus) *federationapi.Cluster {
return &federationapi.Cluster{
ObjectMeta: apiv1.ObjectMeta{
Name: name,
Annotations: map[string]string{},
},
Status: federationapi.ClusterStatus{
Conditions: []federationapi.ClusterCondition{
{Type: federationapi.ClusterReady, Status: readyStatus},
},
},
}
}
// Ensure a key is in the store before returning (or timeout w/ error)
func WaitForStoreUpdate(store util.FederatedReadOnlyStore, clusterName, key string, timeout time.Duration) error {
retryInterval := 100 * time.Millisecond
err := wait.PollImmediate(retryInterval, timeout, func() (bool, error) {
_, found, err := store.GetByKey(clusterName, key)
return found, err
})
return err
}
// Ensure a key is in the store before returning (or timeout w/ error)
func WaitForStoreUpdateChecking(store util.FederatedReadOnlyStore, clusterName, key string, timeout time.Duration,
checkFunction CheckingFunction) error {
retryInterval := 500 * time.Millisecond
var lastError error
err := wait.PollImmediate(retryInterval, timeout, func() (bool, error) {
item, found, err := store.GetByKey(clusterName, key)
if err != nil || !found {
return found, err
}
runtimeObj := item.(runtime.Object)
lastError = checkFunction(runtimeObj)
glog.V(2).Infof("Check function failed for %s %v %v", key, runtimeObj, lastError)
return lastError == nil, nil
})
return err
}
func MetaAndSpecCheckingFunction(expected runtime.Object) CheckingFunction {
return func(obj runtime.Object) error {
if util.ObjectMetaAndSpecEquivalent(obj, expected) {
return nil
}
return fmt.Errorf("Object different expected=%#v received=%#v", expected, obj)
}
}

View file

@ -0,0 +1,40 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
)
// TODO: remove this when Reflector takes an interface rather than a particular ListOptions as input parameter.
func VersionizeV1ListOptions(in api.ListOptions) (out v1.ListOptions) {
if in.LabelSelector != nil {
out.LabelSelector = in.LabelSelector.String()
} else {
out.LabelSelector = ""
}
if in.FieldSelector != nil {
out.FieldSelector = in.FieldSelector.String()
} else {
out.FieldSelector = ""
}
out.Watch = in.Watch
out.ResourceVersion = in.ResourceVersion
out.TimeoutSeconds = in.TimeoutSeconds
return out
}