1
0
Fork 0
forked from barak/tarpoon

Add glide.yaml and vendor deps

This commit is contained in:
Dalton Hubble 2016-12-03 22:43:32 -08:00
parent db918f12ad
commit 5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions

View file

@ -0,0 +1,65 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"cluster_helper.go",
"dns.go",
"doc.go",
"endpoint_helper.go",
"service_helper.go",
"servicecontroller.go",
],
tags = ["automanaged"],
deps = [
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/client/cache:go_default_library",
"//federation/client/clientset_generated/federation_release_1_5:go_default_library",
"//federation/pkg/dnsprovider:go_default_library",
"//federation/pkg/dnsprovider/rrstype:go_default_library",
"//federation/pkg/federation-controller/util:go_default_library",
"//pkg/api/errors:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/client/restclient:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/conversion:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/util/runtime:go_default_library",
"//pkg/util/sets:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/util/workqueue:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = [
"dns_test.go",
"endpoint_helper_test.go",
"service_helper_test.go",
"servicecontroller_test.go",
],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/pkg/dnsprovider/providers/google/clouddns:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/util/sets:go_default_library",
],
)

View file

@ -0,0 +1,205 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"sync"
v1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1"
v1 "k8s.io/kubernetes/pkg/api/v1"
cache "k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient"
pkgruntime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/util/workqueue"
"k8s.io/kubernetes/pkg/watch"
"reflect"
"github.com/golang/glog"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
)
type clusterCache struct {
clientset *kubeclientset.Clientset
cluster *v1beta1.Cluster
// A store of services, populated by the serviceController
serviceStore cache.StoreToServiceLister
// Watches changes to all services
serviceController *cache.Controller
// A store of endpoint, populated by the serviceController
endpointStore cache.StoreToEndpointsLister
// Watches changes to all endpoints
endpointController *cache.Controller
// services that need to be synced
serviceQueue *workqueue.Type
// endpoints that need to be synced
endpointQueue *workqueue.Type
}
type clusterClientCache struct {
rwlock sync.Mutex // protects serviceMap
clientMap map[string]*clusterCache
}
func (cc *clusterClientCache) startClusterLW(cluster *v1beta1.Cluster, clusterName string) {
cachedClusterClient, ok := cc.clientMap[clusterName]
// only create when no existing cachedClusterClient
if ok {
if !reflect.DeepEqual(cachedClusterClient.cluster.Spec, cluster.Spec) {
//rebuild clientset when cluster spec is changed
clientset, err := newClusterClientset(cluster)
if err != nil || clientset == nil {
glog.Errorf("Failed to create corresponding restclient of kubernetes cluster: %v", err)
}
glog.V(4).Infof("Cluster spec changed, rebuild clientset for cluster %s", clusterName)
cachedClusterClient.clientset = clientset
go cachedClusterClient.serviceController.Run(wait.NeverStop)
go cachedClusterClient.endpointController.Run(wait.NeverStop)
glog.V(2).Infof("Start watching services and endpoints on cluster %s", clusterName)
} else {
// do nothing when there is no spec change
glog.V(4).Infof("Keep clientset for cluster %s", clusterName)
return
}
} else {
glog.V(4).Infof("No client cache for cluster %s, building new", clusterName)
clientset, err := newClusterClientset(cluster)
if err != nil || clientset == nil {
glog.Errorf("Failed to create corresponding restclient of kubernetes cluster: %v", err)
}
cachedClusterClient = &clusterCache{
cluster: cluster,
clientset: clientset,
serviceQueue: workqueue.New(),
endpointQueue: workqueue.New(),
}
cachedClusterClient.endpointStore.Store, cachedClusterClient.endpointController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
return clientset.Core().Endpoints(v1.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return clientset.Core().Endpoints(v1.NamespaceAll).Watch(options)
},
},
&v1.Endpoints{},
serviceSyncPeriod,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
cc.enqueueEndpoint(obj, clusterName)
},
UpdateFunc: func(old, cur interface{}) {
cc.enqueueEndpoint(cur, clusterName)
},
DeleteFunc: func(obj interface{}) {
cc.enqueueEndpoint(obj, clusterName)
},
},
)
cachedClusterClient.serviceStore.Indexer, cachedClusterClient.serviceController = cache.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
return clientset.Core().Services(v1.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return clientset.Core().Services(v1.NamespaceAll).Watch(options)
},
},
&v1.Service{},
serviceSyncPeriod,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
cc.enqueueService(obj, clusterName)
},
UpdateFunc: func(old, cur interface{}) {
oldService, ok := old.(*v1.Service)
if !ok {
return
}
curService, ok := cur.(*v1.Service)
if !ok {
return
}
if !reflect.DeepEqual(oldService.Status.LoadBalancer, curService.Status.LoadBalancer) {
cc.enqueueService(cur, clusterName)
}
},
DeleteFunc: func(obj interface{}) {
service, _ := obj.(*v1.Service)
cc.enqueueService(obj, clusterName)
glog.V(2).Infof("Service %s/%s deletion found and enque to service store %s", service.Namespace, service.Name, clusterName)
},
},
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
cc.clientMap[clusterName] = cachedClusterClient
go cachedClusterClient.serviceController.Run(wait.NeverStop)
go cachedClusterClient.endpointController.Run(wait.NeverStop)
glog.V(2).Infof("Start watching services and endpoints on cluster %s", clusterName)
}
}
//TODO: copied from cluster controller, to make this as common function in pass 2
// delFromClusterSet delete a cluster from clusterSet and
// delete the corresponding restclient from the map clusterKubeClientMap
func (cc *clusterClientCache) delFromClusterSet(obj interface{}) {
cluster, ok := obj.(*v1beta1.Cluster)
cc.rwlock.Lock()
defer cc.rwlock.Unlock()
if ok {
delete(cc.clientMap, cluster.Name)
} else {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Infof("Object contained wasn't a cluster or a deleted key: %+v", obj)
return
}
glog.Infof("Found tombstone for %v", obj)
delete(cc.clientMap, tombstone.Key)
}
}
// addToClusterSet inserts the new cluster to clusterSet and creates a corresponding
// restclient to map clusterKubeClientMap
func (cc *clusterClientCache) addToClientMap(obj interface{}) {
cc.rwlock.Lock()
defer cc.rwlock.Unlock()
cluster, ok := obj.(*v1beta1.Cluster)
if !ok {
return
}
pred := getClusterConditionPredicate()
// check status
// skip if not ready
if pred(*cluster) {
cc.startClusterLW(cluster, cluster.Name)
}
}
func newClusterClientset(c *v1beta1.Cluster) (*kubeclientset.Clientset, error) {
clusterConfig, err := util.BuildClusterConfig(c)
if clusterConfig != nil {
clientset := kubeclientset.NewForConfigOrDie(restclient.AddUserAgent(clusterConfig, UserAgentName))
return clientset, nil
}
return nil, err
}

View file

@ -0,0 +1,366 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"fmt"
"net"
"github.com/golang/glog"
"strings"
"k8s.io/kubernetes/federation/pkg/dnsprovider"
"k8s.io/kubernetes/federation/pkg/dnsprovider/rrstype"
)
const (
// minDnsTtl is the minimum safe DNS TTL value to use (in seconds). We use this as the TTL for all DNS records.
minDnsTtl = 180
)
// getHealthyEndpoints returns the hostnames and/or IP addresses of healthy endpoints for the service, at a zone, region and global level (or an error)
func (s *ServiceController) getHealthyEndpoints(clusterName string, cachedService *cachedService) (zoneEndpoints, regionEndpoints, globalEndpoints []string, err error) {
var (
zoneNames []string
regionName string
)
if zoneNames, regionName, err = s.getClusterZoneNames(clusterName); err != nil {
return nil, nil, nil, err
}
for lbClusterName, lbStatus := range cachedService.serviceStatusMap {
lbZoneNames, lbRegionName, err := s.getClusterZoneNames(lbClusterName)
if err != nil {
return nil, nil, nil, err
}
for _, ingress := range lbStatus.Ingress {
readyEndpoints, ok := cachedService.endpointMap[lbClusterName]
if !ok || readyEndpoints == 0 {
continue
}
var address string
// We should get either an IP address or a hostname - use whichever one we get
if ingress.IP != "" {
address = ingress.IP
} else if ingress.Hostname != "" {
address = ingress.Hostname
}
if len(address) <= 0 {
return nil, nil, nil, fmt.Errorf("Service %s/%s in cluster %s has neither LoadBalancerStatus.ingress.ip nor LoadBalancerStatus.ingress.hostname. Cannot use it as endpoint for federated service.",
cachedService.lastState.Name, cachedService.lastState.Namespace, clusterName)
}
for _, lbZoneName := range lbZoneNames {
for _, zoneName := range zoneNames {
if lbZoneName == zoneName {
zoneEndpoints = append(zoneEndpoints, address)
}
}
}
if lbRegionName == regionName {
regionEndpoints = append(regionEndpoints, address)
}
globalEndpoints = append(globalEndpoints, address)
}
}
return zoneEndpoints, regionEndpoints, globalEndpoints, nil
}
// getClusterZoneNames returns the name of the zones (and the region) where the specified cluster exists (e.g. zones "us-east1-c" on GCE, or "us-east-1b" on AWS)
func (s *ServiceController) getClusterZoneNames(clusterName string) (zones []string, region string, err error) {
client, ok := s.clusterCache.clientMap[clusterName]
if !ok {
return nil, "", fmt.Errorf("Cluster cache does not contain entry for cluster %s", clusterName)
}
if client.cluster == nil {
return nil, "", fmt.Errorf("Cluster cache entry for cluster %s is nil", clusterName)
}
return client.cluster.Status.Zones, client.cluster.Status.Region, nil
}
// getServiceDnsSuffix returns the DNS suffix to use when creating federated-service DNS records
func (s *ServiceController) getServiceDnsSuffix() (string, error) {
return s.serviceDnsSuffix, nil
}
// getDnsZones returns the DNS zones matching dnsZoneName and dnsZoneID (if specified)
func getDnsZones(dnsZoneName string, dnsZoneID string, dnsZonesInterface dnsprovider.Zones) ([]dnsprovider.Zone, error) {
// TODO: We need query-by-name and query-by-id functions
dnsZones, err := dnsZonesInterface.List()
if err != nil {
return nil, err
}
var matches []dnsprovider.Zone
findName := strings.TrimSuffix(dnsZoneName, ".")
for _, dnsZone := range dnsZones {
if dnsZoneID != "" {
if dnsZoneID != dnsZone.ID() {
continue
}
}
if findName != "" {
if strings.TrimSuffix(dnsZone.Name(), ".") != findName {
continue
}
}
matches = append(matches, dnsZone)
}
return matches, nil
}
// getDnsZone returns the DNS zone, as identified by dnsZoneName and dnsZoneID
// This is similar to getDnsZones, but returns an error if there are zero or multiple matching zones.
func getDnsZone(dnsZoneName string, dnsZoneID string, dnsZonesInterface dnsprovider.Zones) (dnsprovider.Zone, error) {
dnsZones, err := getDnsZones(dnsZoneName, dnsZoneID, dnsZonesInterface)
if err != nil {
return nil, err
}
if len(dnsZones) == 1 {
return dnsZones[0], nil
}
name := dnsZoneName
if dnsZoneID != "" {
name += "/" + dnsZoneID
}
if len(dnsZones) == 0 {
return nil, fmt.Errorf("DNS zone %s not found.", name)
} else {
return nil, fmt.Errorf("DNS zone %s is ambiguous (please specify zoneID).", name)
}
}
/* getRrset is a hack around the fact that dnsprovider.ResourceRecordSets interface does not yet include a Get() method, only a List() method. TODO: Fix that.
Note that if the named resource record set does not exist, but no error occurred, the returned set, and error, are both nil
*/
func getRrset(dnsName string, rrsetsInterface dnsprovider.ResourceRecordSets) (dnsprovider.ResourceRecordSet, error) {
var returnVal dnsprovider.ResourceRecordSet
rrsets, err := rrsetsInterface.List()
if err != nil {
return nil, err
}
for _, rrset := range rrsets {
if rrset.Name() == dnsName {
returnVal = rrset
break
}
}
return returnVal, nil
}
/* getResolvedEndpoints performs DNS resolution on the provided slice of endpoints (which might be DNS names or IPv4 addresses)
and returns a list of IPv4 addresses. If any of the endpoints are neither valid IPv4 addresses nor resolvable DNS names,
non-nil error is also returned (possibly along with a partially complete list of resolved endpoints.
*/
func getResolvedEndpoints(endpoints []string) ([]string, error) {
resolvedEndpoints := make([]string, 0, len(endpoints))
for _, endpoint := range endpoints {
if net.ParseIP(endpoint) == nil {
// It's not a valid IP address, so assume it's a DNS name, and try to resolve it,
// replacing its DNS name with its IP addresses in expandedEndpoints
ipAddrs, err := net.LookupHost(endpoint)
if err != nil {
return resolvedEndpoints, err
}
resolvedEndpoints = append(resolvedEndpoints, ipAddrs...)
} else {
resolvedEndpoints = append(resolvedEndpoints, endpoint)
}
}
return resolvedEndpoints, nil
}
/* ensureDnsRrsets ensures (idempotently, and with minimum mutations) that all of the DNS resource record sets for dnsName are consistent with endpoints.
if endpoints is nil or empty, a CNAME record to uplevelCname is ensured.
*/
func (s *ServiceController) ensureDnsRrsets(dnsZone dnsprovider.Zone, dnsName string, endpoints []string, uplevelCname string) error {
rrsets, supported := dnsZone.ResourceRecordSets()
if !supported {
return fmt.Errorf("Failed to ensure DNS records for %s. DNS provider does not support the ResourceRecordSets interface.", dnsName)
}
rrset, err := getRrset(dnsName, rrsets) // TODO: rrsets.Get(dnsName)
if err != nil {
return err
}
if rrset == nil {
glog.V(4).Infof("No recordsets found for DNS name %q. Need to add either A records (if we have healthy endpoints), or a CNAME record to %q", dnsName, uplevelCname)
if len(endpoints) < 1 {
glog.V(4).Infof("There are no healthy endpoint addresses at level %q, so CNAME to %q, if provided", dnsName, uplevelCname)
if uplevelCname != "" {
glog.V(4).Infof("Creating CNAME to %q for %q", uplevelCname, dnsName)
newRrset := rrsets.New(dnsName, []string{uplevelCname}, minDnsTtl, rrstype.CNAME)
glog.V(4).Infof("Adding recordset %v", newRrset)
err = rrsets.StartChangeset().Add(newRrset).Apply()
if err != nil {
return err
}
glog.V(4).Infof("Successfully created CNAME to %q for %q", uplevelCname, dnsName)
} else {
glog.V(4).Infof("We want no record for %q, and we have no record, so we're all good.", dnsName)
}
} else {
// We have valid endpoint addresses, so just add them as A records.
// But first resolve DNS names, as some cloud providers (like AWS) expose
// load balancers behind DNS names, not IP addresses.
glog.V(4).Infof("We have valid endpoint addresses %v at level %q, so add them as A records, after resolving DNS names", endpoints, dnsName)
resolvedEndpoints, err := getResolvedEndpoints(endpoints)
if err != nil {
return err // TODO: We could potentially add the ones we did get back, even if some of them failed to resolve.
}
newRrset := rrsets.New(dnsName, resolvedEndpoints, minDnsTtl, rrstype.A)
glog.V(4).Infof("Adding recordset %v", newRrset)
err = rrsets.StartChangeset().Add(newRrset).Apply()
if err != nil {
return err
}
glog.V(4).Infof("Successfully added recordset %v", newRrset)
}
} else {
// the rrset already exists, so make it right.
glog.V(4).Infof("Recordset %v already exists. Ensuring that it is correct.", rrset)
if len(endpoints) < 1 {
// Need an appropriate CNAME record. Check that we have it.
newRrset := rrsets.New(dnsName, []string{uplevelCname}, minDnsTtl, rrstype.CNAME)
glog.V(4).Infof("No healthy endpoints for %s. Have recordset %v. Need recordset %v", dnsName, rrset, newRrset)
if dnsprovider.ResourceRecordSetsEquivalent(rrset, newRrset) {
// The existing rrset is equivalent to the required one - our work is done here
glog.V(4).Infof("Existing recordset %v is equivalent to needed recordset %v, our work is done here.", rrset, newRrset)
return nil
} else {
// Need to replace the existing one with a better one (or just remove it if we have no healthy endpoints).
glog.V(4).Infof("Existing recordset %v not equivalent to needed recordset %v removing existing and adding needed.", rrset, newRrset)
changeSet := rrsets.StartChangeset()
changeSet.Remove(rrset)
if uplevelCname != "" {
changeSet.Add(newRrset)
if err := changeSet.Apply(); err != nil {
return err
}
glog.V(4).Infof("Successfully replaced needed recordset %v -> %v", rrset, newRrset)
} else {
if err := changeSet.Apply(); err != nil {
return err
}
glog.V(4).Infof("Successfully removed existing recordset %v", rrset)
glog.V(4).Infof("Uplevel CNAME is empty string. Not adding recordset %v", newRrset)
}
}
} else {
// We have an rrset in DNS, possibly with some missing addresses and some unwanted addresses.
// And we have healthy endpoints. Just replace what's there with the healthy endpoints, if it's not already correct.
glog.V(4).Infof("%s: Healthy endpoints %v exist. Recordset %v exists. Reconciling.", dnsName, endpoints, rrset)
resolvedEndpoints, err := getResolvedEndpoints(endpoints)
if err != nil { // Some invalid addresses or otherwise unresolvable DNS names.
return err // TODO: We could potentially add the ones we did get back, even if some of them failed to resolve.
}
newRrset := rrsets.New(dnsName, resolvedEndpoints, minDnsTtl, rrstype.A)
glog.V(4).Infof("Have recordset %v. Need recordset %v", rrset, newRrset)
if dnsprovider.ResourceRecordSetsEquivalent(rrset, newRrset) {
glog.V(4).Infof("Existing recordset %v is equivalent to needed recordset %v, our work is done here.", rrset, newRrset)
// TODO: We could be more thorough about checking for equivalence to avoid unnecessary updates, but in the
// worst case we'll just replace what's there with an equivalent, if not exactly identical record set.
return nil
} else {
// Need to replace the existing one with a better one
glog.V(4).Infof("Existing recordset %v is not equivalent to needed recordset %v, removing existing and adding needed.", rrset, newRrset)
if err = rrsets.StartChangeset().Remove(rrset).Add(newRrset).Apply(); err != nil {
return err
}
glog.V(4).Infof("Successfully replaced recordset %v -> %v", rrset, newRrset)
}
}
}
return nil
}
/* ensureDnsRecords ensures (idempotently, and with minimum mutations) that all of the DNS records for a service in a given cluster are correct,
given the current state of that service in that cluster. This should be called every time the state of a service might have changed
(either w.r.t. it's loadbalancer address, or if the number of healthy backend endpoints for that service transitioned from zero to non-zero
(or vice verse). Only shards of the service which have both a loadbalancer ingress IP address or hostname AND at least one healthy backend endpoint
are included in DNS records for that service (at all of zone, region and global levels). All other addresses are removed. Also, if no shards exist
in the zone or region of the cluster, a CNAME reference to the next higher level is ensured to exist. */
func (s *ServiceController) ensureDnsRecords(clusterName string, cachedService *cachedService) error {
// Quinton: Pseudocode....
// See https://github.com/kubernetes/kubernetes/pull/25107#issuecomment-218026648
// For each service we need the following DNS names:
// mysvc.myns.myfed.svc.z1.r1.mydomain.com (for zone z1 in region r1)
// - an A record to IP address of specific shard in that zone (if that shard exists and has healthy endpoints)
// - OR a CNAME record to the next level up, i.e. mysvc.myns.myfed.svc.r1.mydomain.com (if a healthy shard does not exist in zone z1)
// mysvc.myns.myfed.svc.r1.federation
// - a set of A records to IP addresses of all healthy shards in region r1, if one or more of these exist
// - OR a CNAME record to the next level up, i.e. mysvc.myns.myfed.svc.mydomain.com (if no healthy shards exist in region r1)
// mysvc.myns.myfed.svc.federation
// - a set of A records to IP addresses of all healthy shards in all regions, if one or more of these exist.
// - no record (NXRECORD response) if no healthy shards exist in any regions)
//
// For each cached service, cachedService.lastState tracks the current known state of the service, while cachedService.appliedState contains
// the state of the service when we last successfully sync'd it's DNS records.
// So this time around we only need to patch that (add new records, remove deleted records, and update changed records.
//
if s == nil {
return fmt.Errorf("nil ServiceController passed to ServiceController.ensureDnsRecords(clusterName: %s, cachedService: %v)", clusterName, cachedService)
}
if s.dns == nil {
return nil
}
if cachedService == nil {
return fmt.Errorf("nil cachedService passed to ServiceController.ensureDnsRecords(clusterName: %s, cachedService: %v)", clusterName, cachedService)
}
serviceName := cachedService.lastState.Name
namespaceName := cachedService.lastState.Namespace
zoneNames, regionName, err := s.getClusterZoneNames(clusterName)
if err != nil {
return err
}
if zoneNames == nil {
return fmt.Errorf("failed to get cluster zone names")
}
serviceDnsSuffix, err := s.getServiceDnsSuffix()
if err != nil {
return err
}
zoneEndpoints, regionEndpoints, globalEndpoints, err := s.getHealthyEndpoints(clusterName, cachedService)
if err != nil {
return err
}
commonPrefix := serviceName + "." + namespaceName + "." + s.federationName + ".svc"
// dnsNames is the path up the DNS search tree, starting at the leaf
dnsNames := []string{
commonPrefix + "." + zoneNames[0] + "." + regionName + "." + serviceDnsSuffix, // zone level - TODO might need other zone names for multi-zone clusters
commonPrefix + "." + regionName + "." + serviceDnsSuffix, // region level, one up from zone level
commonPrefix + "." + serviceDnsSuffix, // global level, one up from region level
"", // nowhere to go up from global level
}
endpoints := [][]string{zoneEndpoints, regionEndpoints, globalEndpoints}
dnsZone, err := getDnsZone(s.zoneName, s.zoneID, s.dnsZones)
if err != nil {
return err
}
for i, endpoint := range endpoints {
if err = s.ensureDnsRrsets(dnsZone, dnsNames[i], endpoint, dnsNames[i+1]); err != nil {
return err
}
}
return nil
}

View file

@ -0,0 +1,171 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"sync"
"testing"
"fmt"
"reflect"
"sort"
"k8s.io/kubernetes/federation/apis/federation/v1beta1"
"k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns" // Only for unit testing purposes.
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/sets"
)
func TestServiceController_ensureDnsRecords(t *testing.T) {
tests := []struct {
name string
service v1.Service
expected []string
serviceStatus v1.LoadBalancerStatus
}{
{
name: "withip",
service: v1.Service{
ObjectMeta: v1.ObjectMeta{
Name: "servicename",
Namespace: "servicenamespace",
},
},
serviceStatus: buildServiceStatus([][]string{{"198.51.100.1", ""}}),
expected: []string{
"example.com:servicename.servicenamespace.myfederation.svc.federation.example.com:A:180:[198.51.100.1]",
"example.com:servicename.servicenamespace.myfederation.svc.fooregion.federation.example.com:A:180:[198.51.100.1]",
"example.com:servicename.servicenamespace.myfederation.svc.foozone.fooregion.federation.example.com:A:180:[198.51.100.1]",
},
},
/*
TODO: getResolvedEndpoints preforms DNS lookup.
Mock and maybe look at error handling when some endpoints resolve, but also caching?
{
name: "withname",
service: v1.Service{
ObjectMeta: v1.ObjectMeta{
Name: "servicename",
Namespace: "servicenamespace",
},
},
serviceStatus: buildServiceStatus([][]string{{"", "randomstring.amazonelb.example.com"}}),
expected: []string{
"example.com:servicename.servicenamespace.myfederation.svc.federation.example.com:A:180:[198.51.100.1]",
"example.com:servicename.servicenamespace.myfederation.svc.fooregion.federation.example.com:A:180:[198.51.100.1]",
"example.com:servicename.servicenamespace.myfederation.svc.foozone.fooregion.federation.example.com:A:180:[198.51.100.1]",
},
},
*/
{
name: "noendpoints",
service: v1.Service{
ObjectMeta: v1.ObjectMeta{
Name: "servicename",
Namespace: "servicenamespace",
},
},
expected: []string{
"example.com:servicename.servicenamespace.myfederation.svc.fooregion.federation.example.com:CNAME:180:[servicename.servicenamespace.myfederation.svc.federation.example.com]",
"example.com:servicename.servicenamespace.myfederation.svc.foozone.fooregion.federation.example.com:CNAME:180:[servicename.servicenamespace.myfederation.svc.fooregion.federation.example.com]",
},
},
}
for _, test := range tests {
fakedns, _ := clouddns.NewFakeInterface()
fakednsZones, ok := fakedns.Zones()
if !ok {
t.Error("Unable to fetch zones")
}
serviceController := ServiceController{
dns: fakedns,
dnsZones: fakednsZones,
serviceDnsSuffix: "federation.example.com",
zoneName: "example.com",
federationName: "myfederation",
serviceCache: &serviceCache{fedServiceMap: make(map[string]*cachedService)},
clusterCache: &clusterClientCache{
rwlock: sync.Mutex{},
clientMap: make(map[string]*clusterCache),
},
knownClusterSet: make(sets.String),
}
clusterName := "testcluster"
serviceController.clusterCache.clientMap[clusterName] = &clusterCache{
cluster: &v1beta1.Cluster{
Status: v1beta1.ClusterStatus{
Zones: []string{"foozone"},
Region: "fooregion",
},
},
}
cachedService := &cachedService{
lastState: &test.service,
endpointMap: make(map[string]int),
serviceStatusMap: make(map[string]v1.LoadBalancerStatus),
}
cachedService.endpointMap[clusterName] = 1
if !reflect.DeepEqual(&test.serviceStatus, &v1.LoadBalancerStatus{}) {
cachedService.serviceStatusMap[clusterName] = test.serviceStatus
}
err := serviceController.ensureDnsRecords(clusterName, cachedService)
if err != nil {
t.Errorf("Test failed for %s, unexpected error %v", test.name, err)
}
zones, err := fakednsZones.List()
if err != nil {
t.Errorf("error querying zones: %v", err)
}
// Dump every record to a testable-by-string-comparison form
var records []string
for _, z := range zones {
zoneName := z.Name()
rrs, ok := z.ResourceRecordSets()
if !ok {
t.Errorf("cannot get rrs for zone %q", zoneName)
}
rrList, err := rrs.List()
if err != nil {
t.Errorf("error querying rr for zone %q: %v", zoneName, err)
}
for _, rr := range rrList {
rrdatas := rr.Rrdatas()
// Put in consistent (testable-by-string-comparison) order
sort.Strings(rrdatas)
records = append(records, fmt.Sprintf("%s:%s:%s:%d:%s", zoneName, rr.Name(), rr.Type(), rr.Ttl(), rrdatas))
}
}
// Ignore order of records
sort.Strings(records)
sort.Strings(test.expected)
if !reflect.DeepEqual(records, test.expected) {
t.Errorf("Test %q failed. Actual=%v, Expected=%v", test.name, records, test.expected)
}
}
}

View file

@ -0,0 +1,19 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package service contains code for syncing Kubernetes services,
// and cloud DNS servers with the federated service registry.
package service // import "k8s.io/kubernetes/federation/pkg/federation-controller/service"

View file

@ -0,0 +1,206 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"fmt"
"time"
fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
v1 "k8s.io/kubernetes/pkg/api/v1"
cache "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/controller"
"github.com/golang/glog"
)
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (sc *ServiceController) clusterEndpointWorker() {
// process all pending events in endpointWorkerDoneChan
ForLoop:
for {
select {
case clusterName := <-sc.endpointWorkerDoneChan:
sc.endpointWorkerMap[clusterName] = false
default:
// non-blocking, comes here if all existing events are processed
break ForLoop
}
}
for clusterName, cache := range sc.clusterCache.clientMap {
workerExist, found := sc.endpointWorkerMap[clusterName]
if found && workerExist {
continue
}
// create a worker only if the previous worker has finished and gone out of scope
go func(cache *clusterCache, clusterName string) {
fedClient := sc.federationClient
for {
func() {
key, quit := cache.endpointQueue.Get()
// update endpoint cache
if quit {
// send signal that current worker has finished tasks and is going out of scope
sc.endpointWorkerDoneChan <- clusterName
return
}
defer cache.endpointQueue.Done(key)
err := sc.clusterCache.syncEndpoint(key.(string), clusterName, cache, sc.serviceCache, fedClient, sc)
if err != nil {
glog.V(2).Infof("Failed to sync endpoint: %+v", err)
}
}()
}
}(cache, clusterName)
sc.endpointWorkerMap[clusterName] = true
}
}
// Whenever there is change on endpoint, the federation service should be updated
// key is the namespaced name of endpoint
func (cc *clusterClientCache) syncEndpoint(key, clusterName string, clusterCache *clusterCache, serviceCache *serviceCache, fedClient fedclientset.Interface, serviceController *ServiceController) error {
cachedService, ok := serviceCache.get(key)
if !ok {
// here we filtered all non-federation services
return nil
}
endpointInterface, exists, err := clusterCache.endpointStore.GetByKey(key)
if err != nil {
glog.Errorf("Did not successfully get %v from store: %v, will retry later", key, err)
clusterCache.endpointQueue.Add(key)
return err
}
if exists {
endpoint, ok := endpointInterface.(*v1.Endpoints)
if ok {
glog.V(4).Infof("Found endpoint for federation service %s/%s from cluster %s", endpoint.Namespace, endpoint.Name, clusterName)
err = cc.processEndpointUpdate(cachedService, endpoint, clusterName, serviceController)
} else {
_, ok := endpointInterface.(cache.DeletedFinalStateUnknown)
if !ok {
return fmt.Errorf("Object contained wasn't a service or a deleted key: %+v", endpointInterface)
}
glog.Infof("Found tombstone for %v", key)
err = cc.processEndpointDeletion(cachedService, clusterName, serviceController)
}
} else {
// service absence in store means watcher caught the deletion, ensure LB info is cleaned
glog.Infof("Can not get endpoint %v for cluster %s from endpointStore", key, clusterName)
err = cc.processEndpointDeletion(cachedService, clusterName, serviceController)
}
if err != nil {
glog.Errorf("Failed to sync service: %+v, put back to service queue", err)
clusterCache.endpointQueue.Add(key)
}
cachedService.resetDNSUpdateDelay()
return nil
}
func (cc *clusterClientCache) processEndpointDeletion(cachedService *cachedService, clusterName string, serviceController *ServiceController) error {
glog.V(4).Infof("Processing endpoint deletion for %s/%s, cluster %s", cachedService.lastState.Namespace, cachedService.lastState.Name, clusterName)
var err error
cachedService.rwlock.Lock()
defer cachedService.rwlock.Unlock()
_, ok := cachedService.endpointMap[clusterName]
// TODO remove ok checking? if service controller is restarted, then endpointMap for the cluster does not exist
// need to query dns info from dnsprovider and make sure of if deletion is needed
if ok {
// endpoints lost, clean dns record
glog.V(4).Infof("Cached endpoint was found for %s/%s, cluster %s, removing", cachedService.lastState.Namespace, cachedService.lastState.Name, clusterName)
delete(cachedService.endpointMap, clusterName)
for i := 0; i < clientRetryCount; i++ {
err := serviceController.ensureDnsRecords(clusterName, cachedService)
if err == nil {
return nil
}
glog.V(4).Infof("Error ensuring DNS Records: %v", err)
time.Sleep(cachedService.nextDNSUpdateDelay())
}
}
return err
}
// Update dns info when endpoint update event received
// We do not care about the endpoint info, what we need to make sure here is len(endpoints.subsets)>0
func (cc *clusterClientCache) processEndpointUpdate(cachedService *cachedService, endpoint *v1.Endpoints, clusterName string, serviceController *ServiceController) error {
glog.V(4).Infof("Processing endpoint update for %s/%s, cluster %s", endpoint.Namespace, endpoint.Name, clusterName)
var err error
cachedService.rwlock.Lock()
var reachable bool
defer cachedService.rwlock.Unlock()
_, ok := cachedService.endpointMap[clusterName]
if !ok {
for _, subset := range endpoint.Subsets {
if len(subset.Addresses) > 0 {
reachable = true
break
}
}
if reachable {
// first time get endpoints, update dns record
glog.V(4).Infof("Reachable endpoint was found for %s/%s, cluster %s, building endpointMap", endpoint.Namespace, endpoint.Name, clusterName)
cachedService.endpointMap[clusterName] = 1
for i := 0; i < clientRetryCount; i++ {
err := serviceController.ensureDnsRecords(clusterName, cachedService)
if err == nil {
return nil
}
glog.V(4).Infof("Error ensuring DNS Records: %v", err)
time.Sleep(cachedService.nextDNSUpdateDelay())
}
return err
}
} else {
for _, subset := range endpoint.Subsets {
if len(subset.Addresses) > 0 {
reachable = true
break
}
}
if !reachable {
// first time get endpoints, update dns record
glog.V(4).Infof("Reachable endpoint was lost for %s/%s, cluster %s, deleting endpointMap", endpoint.Namespace, endpoint.Name, clusterName)
delete(cachedService.endpointMap, clusterName)
for i := 0; i < clientRetryCount; i++ {
err := serviceController.ensureDnsRecords(clusterName, cachedService)
if err == nil {
return nil
}
glog.V(4).Infof("Error ensuring DNS Records: %v", err)
time.Sleep(cachedService.nextDNSUpdateDelay())
}
return err
}
}
return nil
}
// obj could be an *api.Endpoints, or a DeletionFinalStateUnknown marker item.
func (cc *clusterClientCache) enqueueEndpoint(obj interface{}, clusterName string) {
key, err := controller.KeyFunc(obj)
if err != nil {
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
return
}
_, ok := cc.clientMap[clusterName]
if ok {
cc.clientMap[clusterName].endpointQueue.Add(key)
}
}

View file

@ -0,0 +1,161 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"testing"
"k8s.io/kubernetes/federation/apis/federation/v1beta1"
"k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns" // Only for unit testing purposes.
v1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/sets"
)
var fakeDns, _ = clouddns.NewFakeInterface() // No need to check for unsupported interfaces, as the fake interface supports everything that's required.
var fakeDnsZones, _ = fakeDns.Zones()
var fakeServiceController = ServiceController{
dns: fakeDns,
dnsZones: fakeDnsZones,
federationName: "fed1",
zoneName: "example.com",
serviceDnsSuffix: "federation.example.com",
serviceCache: &serviceCache{fedServiceMap: make(map[string]*cachedService)},
clusterCache: &clusterClientCache{
clientMap: make(map[string]*clusterCache),
},
knownClusterSet: make(sets.String),
}
func buildEndpoint(subsets [][]string) *v1.Endpoints {
endpoint := &v1.Endpoints{
Subsets: []v1.EndpointSubset{
{Addresses: []v1.EndpointAddress{}},
},
}
for _, element := range subsets {
address := v1.EndpointAddress{IP: element[0], Hostname: element[1], TargetRef: nil}
endpoint.Subsets[0].Addresses = append(endpoint.Subsets[0].Addresses, address)
}
return endpoint
}
func TestProcessEndpointUpdate(t *testing.T) {
clusterName := "foo"
cc := clusterClientCache{
clientMap: map[string]*clusterCache{
clusterName: {
cluster: &v1beta1.Cluster{
Status: v1beta1.ClusterStatus{
Zones: []string{"foozone"},
Region: "fooregion",
},
},
},
},
}
tests := []struct {
name string
cachedService *cachedService
endpoint *v1.Endpoints
clusterName string
expectResult int
}{
{
"no-cache",
&cachedService{
lastState: &v1.Service{},
endpointMap: make(map[string]int),
},
buildEndpoint([][]string{{"ip1", ""}}),
clusterName,
1,
},
{
"has-cache",
&cachedService{
lastState: &v1.Service{},
endpointMap: map[string]int{
"foo": 1,
},
},
buildEndpoint([][]string{{"ip1", ""}}),
clusterName,
1,
},
}
fakeServiceController.clusterCache = &cc
for _, test := range tests {
cc.processEndpointUpdate(test.cachedService, test.endpoint, test.clusterName, &fakeServiceController)
if test.expectResult != test.cachedService.endpointMap[test.clusterName] {
t.Errorf("Test failed for %s, expected %v, saw %v", test.name, test.expectResult, test.cachedService.endpointMap[test.clusterName])
}
}
}
func TestProcessEndpointDeletion(t *testing.T) {
clusterName := "foo"
cc := clusterClientCache{
clientMap: map[string]*clusterCache{
clusterName: {
cluster: &v1beta1.Cluster{
Status: v1beta1.ClusterStatus{
Zones: []string{"foozone"},
Region: "fooregion",
},
},
},
},
}
tests := []struct {
name string
cachedService *cachedService
endpoint *v1.Endpoints
clusterName string
expectResult int
}{
{
"no-cache",
&cachedService{
lastState: &v1.Service{},
endpointMap: make(map[string]int),
},
buildEndpoint([][]string{{"ip1", ""}}),
clusterName,
0,
},
{
"has-cache",
&cachedService{
lastState: &v1.Service{},
endpointMap: map[string]int{
clusterName: 1,
},
},
buildEndpoint([][]string{{"ip1", ""}}),
clusterName,
0,
},
}
fakeServiceController.clusterCache = &cc
for _, test := range tests {
cc.processEndpointDeletion(test.cachedService, test.clusterName, &fakeServiceController)
if test.expectResult != test.cachedService.endpointMap[test.clusterName] {
t.Errorf("Test failed for %s, expected %v, saw %v", test.name, test.expectResult, test.cachedService.endpointMap[test.clusterName])
}
}
}

View file

@ -0,0 +1,303 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"fmt"
"time"
fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
"k8s.io/kubernetes/pkg/api/errors"
v1 "k8s.io/kubernetes/pkg/api/v1"
cache "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/controller"
"reflect"
"sort"
"github.com/golang/glog"
)
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (sc *ServiceController) clusterServiceWorker() {
// process all pending events in serviceWorkerDoneChan
ForLoop:
for {
select {
case clusterName := <-sc.serviceWorkerDoneChan:
sc.serviceWorkerMap[clusterName] = false
default:
// non-blocking, comes here if all existing events are processed
break ForLoop
}
}
for clusterName, cache := range sc.clusterCache.clientMap {
workerExist, found := sc.serviceWorkerMap[clusterName]
if found && workerExist {
continue
}
// create a worker only if the previous worker has finished and gone out of scope
go func(cache *clusterCache, clusterName string) {
fedClient := sc.federationClient
for {
func() {
key, quit := cache.serviceQueue.Get()
if quit {
// send signal that current worker has finished tasks and is going out of scope
sc.serviceWorkerDoneChan <- clusterName
return
}
defer cache.serviceQueue.Done(key)
err := sc.clusterCache.syncService(key.(string), clusterName, cache, sc.serviceCache, fedClient, sc)
if err != nil {
glog.Errorf("Failed to sync service: %+v", err)
}
}()
}
}(cache, clusterName)
sc.serviceWorkerMap[clusterName] = true
}
}
// Whenever there is change on service, the federation service should be updated
func (cc *clusterClientCache) syncService(key, clusterName string, clusterCache *clusterCache, serviceCache *serviceCache, fedClient fedclientset.Interface, sc *ServiceController) error {
// obj holds the latest service info from apiserver, return if there is no federation cache for the service
cachedService, ok := serviceCache.get(key)
if !ok {
// if serviceCache does not exists, that means the service is not created by federation, we should skip it
return nil
}
serviceInterface, exists, err := clusterCache.serviceStore.Indexer.GetByKey(key)
if err != nil {
glog.Errorf("Did not successfully get %v from store: %v, will retry later", key, err)
clusterCache.serviceQueue.Add(key)
return err
}
var needUpdate, isDeletion bool
if exists {
service, ok := serviceInterface.(*v1.Service)
if ok {
glog.V(4).Infof("Found service for federation service %s/%s from cluster %s", service.Namespace, service.Name, clusterName)
needUpdate = cc.processServiceUpdate(cachedService, service, clusterName)
} else {
_, ok := serviceInterface.(cache.DeletedFinalStateUnknown)
if !ok {
return fmt.Errorf("Object contained wasn't a service or a deleted key: %+v", serviceInterface)
}
glog.Infof("Found tombstone for %v", key)
needUpdate = cc.processServiceDeletion(cachedService, clusterName)
isDeletion = true
}
} else {
glog.Infof("Can not get service %v for cluster %s from serviceStore", key, clusterName)
needUpdate = cc.processServiceDeletion(cachedService, clusterName)
isDeletion = true
}
if needUpdate {
for i := 0; i < clientRetryCount; i++ {
err := sc.ensureDnsRecords(clusterName, cachedService)
if err == nil {
break
}
glog.V(4).Infof("Error ensuring DNS Records for service %s on cluster %s: %v", key, clusterName, err)
time.Sleep(cachedService.nextDNSUpdateDelay())
clusterCache.serviceQueue.Add(key)
// did not retry here as we still want to persist federation apiserver even ensure dns records fails
}
err := cc.persistFedServiceUpdate(cachedService, fedClient)
if err == nil {
cachedService.appliedState = cachedService.lastState
cachedService.resetFedUpdateDelay()
} else {
if err != nil {
glog.Errorf("Failed to sync service: %+v, put back to service queue", err)
clusterCache.serviceQueue.Add(key)
}
}
}
if isDeletion {
// cachedService is not reliable here as
// deleting cache is the last step of federation service deletion
_, err := fedClient.Core().Services(cachedService.lastState.Namespace).Get(cachedService.lastState.Name)
// rebuild service if federation service still exists
if err == nil || !errors.IsNotFound(err) {
return sc.ensureClusterService(cachedService, clusterName, cachedService.appliedState, clusterCache.clientset)
}
}
return nil
}
// processServiceDeletion is triggered when a service is delete from underlying k8s cluster
// the deletion function will wip out the cached ingress info of the service from federation service ingress
// the function returns a bool to indicate if actual update happened on federation service cache
// and if the federation service cache is updated, the updated info should be post to federation apiserver
func (cc *clusterClientCache) processServiceDeletion(cachedService *cachedService, clusterName string) bool {
cachedService.rwlock.Lock()
defer cachedService.rwlock.Unlock()
cachedStatus, ok := cachedService.serviceStatusMap[clusterName]
// cached status found, remove ingress info from federation service cache
if ok {
cachedFedServiceStatus := cachedService.lastState.Status.LoadBalancer
removeIndexes := []int{}
for i, fed := range cachedFedServiceStatus.Ingress {
for _, new := range cachedStatus.Ingress {
// remove if same ingress record found
if new.IP == fed.IP && new.Hostname == fed.Hostname {
removeIndexes = append(removeIndexes, i)
}
}
}
sort.Ints(removeIndexes)
for i := len(removeIndexes) - 1; i >= 0; i-- {
cachedFedServiceStatus.Ingress = append(cachedFedServiceStatus.Ingress[:removeIndexes[i]], cachedFedServiceStatus.Ingress[removeIndexes[i]+1:]...)
glog.V(4).Infof("Remove old ingress %d for service %s/%s", removeIndexes[i], cachedService.lastState.Namespace, cachedService.lastState.Name)
}
delete(cachedService.serviceStatusMap, clusterName)
delete(cachedService.endpointMap, clusterName)
cachedService.lastState.Status.LoadBalancer = cachedFedServiceStatus
return true
} else {
glog.V(4).Infof("Service removal %s/%s from cluster %s observed.", cachedService.lastState.Namespace, cachedService.lastState.Name, clusterName)
}
return false
}
// processServiceUpdate Update ingress info when service updated
// the function returns a bool to indicate if actual update happened on federation service cache
// and if the federation service cache is updated, the updated info should be post to federation apiserver
func (cc *clusterClientCache) processServiceUpdate(cachedService *cachedService, service *v1.Service, clusterName string) bool {
glog.V(4).Infof("Processing service update for %s/%s, cluster %s", service.Namespace, service.Name, clusterName)
cachedService.rwlock.Lock()
defer cachedService.rwlock.Unlock()
var needUpdate bool
newServiceLB := service.Status.LoadBalancer
cachedFedServiceStatus := cachedService.lastState.Status.LoadBalancer
if len(newServiceLB.Ingress) == 0 {
// not yet get LB IP
return false
}
cachedStatus, ok := cachedService.serviceStatusMap[clusterName]
if ok {
if reflect.DeepEqual(cachedStatus, newServiceLB) {
glog.V(4).Infof("Same ingress info observed for service %s/%s: %+v ", service.Namespace, service.Name, cachedStatus.Ingress)
} else {
glog.V(4).Infof("Ingress info was changed for service %s/%s: cache: %+v, new: %+v ",
service.Namespace, service.Name, cachedStatus.Ingress, newServiceLB)
needUpdate = true
}
} else {
glog.V(4).Infof("Cached service status was not found for %s/%s, cluster %s, building one", service.Namespace, service.Name, clusterName)
// cache is not always reliable(cache will be cleaned when service controller restart)
// two cases will run into this branch:
// 1. new service loadbalancer info received -> no info in cache, and no in federation service
// 2. service controller being restarted -> no info in cache, but it is in federation service
// check if the lb info is already in federation service
cachedService.serviceStatusMap[clusterName] = newServiceLB
needUpdate = false
// iterate service ingress info
for _, new := range newServiceLB.Ingress {
var found bool
// if it is known by federation service
for _, fed := range cachedFedServiceStatus.Ingress {
if new.IP == fed.IP && new.Hostname == fed.Hostname {
found = true
break
}
}
if !found {
needUpdate = true
break
}
}
}
if needUpdate {
// new status = cached federation status - cached status + new status from k8s cluster
removeIndexes := []int{}
for i, fed := range cachedFedServiceStatus.Ingress {
for _, new := range cachedStatus.Ingress {
// remove if same ingress record found
if new.IP == fed.IP && new.Hostname == fed.Hostname {
removeIndexes = append(removeIndexes, i)
}
}
}
sort.Ints(removeIndexes)
for i := len(removeIndexes) - 1; i >= 0; i-- {
cachedFedServiceStatus.Ingress = append(cachedFedServiceStatus.Ingress[:removeIndexes[i]], cachedFedServiceStatus.Ingress[removeIndexes[i]+1:]...)
}
cachedFedServiceStatus.Ingress = append(cachedFedServiceStatus.Ingress, service.Status.LoadBalancer.Ingress...)
cachedService.lastState.Status.LoadBalancer = cachedFedServiceStatus
glog.V(4).Infof("Add new ingress info %+v for service %s/%s", service.Status.LoadBalancer, service.Namespace, service.Name)
} else {
glog.V(4).Infof("Same ingress info found for %s/%s, cluster %s", service.Namespace, service.Name, clusterName)
}
return needUpdate
}
func (cc *clusterClientCache) persistFedServiceUpdate(cachedService *cachedService, fedClient fedclientset.Interface) error {
service := cachedService.lastState
glog.V(5).Infof("Persist federation service status %s/%s", service.Namespace, service.Name)
var err error
for i := 0; i < clientRetryCount; i++ {
_, err := fedClient.Core().Services(service.Namespace).Get(service.Name)
if errors.IsNotFound(err) {
glog.Infof("Not persisting update to service '%s/%s' that no longer exists: %v",
service.Namespace, service.Name, err)
return nil
}
_, err = fedClient.Core().Services(service.Namespace).UpdateStatus(service)
if err == nil {
glog.V(2).Infof("Successfully update service %s/%s to federation apiserver", service.Namespace, service.Name)
return nil
}
if errors.IsNotFound(err) {
glog.Infof("Not persisting update to service '%s/%s' that no longer exists: %v",
service.Namespace, service.Name, err)
return nil
}
if errors.IsConflict(err) {
glog.V(4).Infof("Not persisting update to service '%s/%s' that has been changed since we received it: %v",
service.Namespace, service.Name, err)
return err
}
time.Sleep(cachedService.nextFedUpdateDelay())
}
return err
}
// obj could be an *api.Service, or a DeletionFinalStateUnknown marker item.
func (cc *clusterClientCache) enqueueService(obj interface{}, clusterName string) {
key, err := controller.KeyFunc(obj)
if err != nil {
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
return
}
_, ok := cc.clientMap[clusterName]
if ok {
cc.clientMap[clusterName].serviceQueue.Add(key)
}
}

View file

@ -0,0 +1,162 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"reflect"
"testing"
"k8s.io/kubernetes/pkg/api/v1"
)
func buildServiceStatus(ingresses [][]string) v1.LoadBalancerStatus {
status := v1.LoadBalancerStatus{
Ingress: []v1.LoadBalancerIngress{},
}
for _, element := range ingresses {
ingress := v1.LoadBalancerIngress{IP: element[0], Hostname: element[1]}
status.Ingress = append(status.Ingress, ingress)
}
return status
}
func TestProcessServiceUpdate(t *testing.T) {
cc := clusterClientCache{
clientMap: make(map[string]*clusterCache),
}
tests := []struct {
name string
cachedService *cachedService
service *v1.Service
clusterName string
expectNeedUpdate bool
expectStatus v1.LoadBalancerStatus
}{
{
"no-cache",
&cachedService{
lastState: &v1.Service{},
serviceStatusMap: make(map[string]v1.LoadBalancerStatus),
},
&v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}})}},
"foo",
true,
buildServiceStatus([][]string{{"ip1", ""}}),
},
{
"same-ingress",
&cachedService{
lastState: &v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}})}},
serviceStatusMap: map[string]v1.LoadBalancerStatus{
"foo1": {Ingress: []v1.LoadBalancerIngress{{IP: "ip1", Hostname: ""}}},
},
},
&v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}})}},
"foo1",
false,
buildServiceStatus([][]string{{"ip1", ""}}),
},
{
"diff-cluster",
&cachedService{
lastState: &v1.Service{
ObjectMeta: v1.ObjectMeta{Name: "bar1"},
},
serviceStatusMap: map[string]v1.LoadBalancerStatus{
"foo2": {Ingress: []v1.LoadBalancerIngress{{IP: "ip1", Hostname: ""}}},
},
},
&v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}})}},
"foo1",
true,
buildServiceStatus([][]string{{"ip1", ""}}),
},
{
"diff-ingress",
&cachedService{
lastState: &v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip4", ""}, {"ip1", ""}, {"ip2", ""}})}},
serviceStatusMap: map[string]v1.LoadBalancerStatus{
"foo1": buildServiceStatus([][]string{{"ip4", ""}, {"ip1", ""}, {"ip2", ""}}),
},
},
&v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip2", ""}, {"ip3", ""}, {"ip5", ""}})}},
"foo1",
true,
buildServiceStatus([][]string{{"ip2", ""}, {"ip3", ""}, {"ip5", ""}}),
},
}
for _, test := range tests {
result := cc.processServiceUpdate(test.cachedService, test.service, test.clusterName)
if test.expectNeedUpdate != result {
t.Errorf("Test failed for %s, expected %v, saw %v", test.name, test.expectNeedUpdate, result)
}
if !reflect.DeepEqual(test.expectStatus, test.cachedService.lastState.Status.LoadBalancer) {
t.Errorf("Test failed for %s, expected %v, saw %v", test.name, test.expectStatus, test.cachedService.lastState.Status.LoadBalancer)
}
}
}
func TestProcessServiceDeletion(t *testing.T) {
cc := clusterClientCache{
clientMap: make(map[string]*clusterCache),
}
tests := []struct {
name string
cachedService *cachedService
service *v1.Service
clusterName string
expectNeedUpdate bool
expectStatus v1.LoadBalancerStatus
}{
{
"same-ingress",
&cachedService{
lastState: &v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}})}},
serviceStatusMap: map[string]v1.LoadBalancerStatus{
"foo1": {Ingress: []v1.LoadBalancerIngress{{IP: "ip1", Hostname: ""}}},
},
},
&v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}})}},
"foo1",
true,
buildServiceStatus([][]string{}),
},
{
"diff-ingress",
&cachedService{
lastState: &v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip4", ""}, {"ip1", ""}, {"ip2", ""}, {"ip3", ""}, {"ip5", ""}, {"ip6", ""}, {"ip8", ""}})}},
serviceStatusMap: map[string]v1.LoadBalancerStatus{
"foo1": buildServiceStatus([][]string{{"ip1", ""}, {"ip2", ""}, {"ip3", ""}}),
"foo2": buildServiceStatus([][]string{{"ip5", ""}, {"ip6", ""}, {"ip8", ""}}),
},
},
&v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}, {"ip2", ""}, {"ip3", ""}})}},
"foo1",
true,
buildServiceStatus([][]string{{"ip4", ""}, {"ip5", ""}, {"ip6", ""}, {"ip8", ""}}),
},
}
for _, test := range tests {
result := cc.processServiceDeletion(test.cachedService, test.clusterName)
if test.expectNeedUpdate != result {
t.Errorf("Test failed for %s, expected %v, saw %v", test.name, test.expectNeedUpdate, result)
}
if !reflect.DeepEqual(test.expectStatus, test.cachedService.lastState.Status.LoadBalancer) {
t.Errorf("Test failed for %s, expected %+v, saw %+v", test.name, test.expectStatus, test.cachedService.lastState.Status.LoadBalancer)
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,85 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"sync"
"testing"
"k8s.io/kubernetes/federation/apis/federation/v1beta1"
"k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns" // Only for unit testing purposes.
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/sets"
)
func TestGetClusterConditionPredicate(t *testing.T) {
fakedns, _ := clouddns.NewFakeInterface() // No need to check for unsupported interfaces, as the fake interface supports everything that's required.
serviceController := ServiceController{
dns: fakedns,
serviceCache: &serviceCache{fedServiceMap: make(map[string]*cachedService)},
clusterCache: &clusterClientCache{
rwlock: sync.Mutex{},
clientMap: make(map[string]*clusterCache),
},
knownClusterSet: make(sets.String),
}
tests := []struct {
cluster v1beta1.Cluster
expectAccept bool
name string
serviceController *ServiceController
}{
{
cluster: v1beta1.Cluster{},
expectAccept: false,
name: "empty",
serviceController: &serviceController,
},
{
cluster: v1beta1.Cluster{
Status: v1beta1.ClusterStatus{
Conditions: []v1beta1.ClusterCondition{
{Type: v1beta1.ClusterReady, Status: v1.ConditionTrue},
},
},
},
expectAccept: true,
name: "basic",
serviceController: &serviceController,
},
{
cluster: v1beta1.Cluster{
Status: v1beta1.ClusterStatus{
Conditions: []v1beta1.ClusterCondition{
{Type: v1beta1.ClusterReady, Status: v1.ConditionFalse},
},
},
},
expectAccept: false,
name: "notready",
serviceController: &serviceController,
},
}
pred := getClusterConditionPredicate()
for _, test := range tests {
accept := pred(test.cluster)
if accept != test.expectAccept {
t.Errorf("Test failed for %s, expected %v, saw %v", test.name, test.expectAccept, accept)
}
}
}