Add glide.yaml and vendor deps

This commit is contained in:
Dalton Hubble 2016-12-03 22:43:32 -08:00
parent db918f12ad
commit 5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions

View file

@ -0,0 +1,72 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"aws.go",
"aws_instancegroups.go",
"aws_loadbalancer.go",
"aws_routes.go",
"aws_utils.go",
"log_handler.go",
"retry_handler.go",
"sets_ippermissions.go",
"volumes.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/api/v1/service:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/credentialprovider/aws:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/sets:go_default_library",
"//pkg/volume:go_default_library",
"//vendor:github.com/aws/aws-sdk-go/aws",
"//vendor:github.com/aws/aws-sdk-go/aws/awserr",
"//vendor:github.com/aws/aws-sdk-go/aws/credentials",
"//vendor:github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
"//vendor:github.com/aws/aws-sdk-go/aws/ec2metadata",
"//vendor:github.com/aws/aws-sdk-go/aws/request",
"//vendor:github.com/aws/aws-sdk-go/aws/session",
"//vendor:github.com/aws/aws-sdk-go/service/autoscaling",
"//vendor:github.com/aws/aws-sdk-go/service/ec2",
"//vendor:github.com/aws/aws-sdk-go/service/elb",
"//vendor:github.com/golang/glog",
"//vendor:gopkg.in/gcfg.v1",
],
)
go_test(
name = "go_default_test",
srcs = [
"aws_test.go",
"retry_handler_test.go",
],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/sets:go_default_library",
"//vendor:github.com/aws/aws-sdk-go/aws",
"//vendor:github.com/aws/aws-sdk-go/service/autoscaling",
"//vendor:github.com/aws/aws-sdk-go/service/ec2",
"//vendor:github.com/aws/aws-sdk-go/service/elb",
"//vendor:github.com/golang/glog",
"//vendor:github.com/stretchr/testify/assert",
"//vendor:github.com/stretchr/testify/mock",
],
)

View file

@ -0,0 +1,2 @@
assignees:
- justinsb

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,90 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/golang/glog"
)
// AWSCloud implements InstanceGroups
var _ InstanceGroups = &Cloud{}
// ResizeInstanceGroup sets the size of the specificed instancegroup Exported
// so it can be used by the e2e tests, which don't want to instantiate a full
// cloudprovider.
func ResizeInstanceGroup(asg ASG, instanceGroupName string, size int) error {
request := &autoscaling.UpdateAutoScalingGroupInput{
AutoScalingGroupName: aws.String(instanceGroupName),
MinSize: aws.Int64(int64(size)),
MaxSize: aws.Int64(int64(size)),
}
if _, err := asg.UpdateAutoScalingGroup(request); err != nil {
return fmt.Errorf("error resizing AWS autoscaling group: %v", err)
}
return nil
}
// Implement InstanceGroups.ResizeInstanceGroup
// Set the size to the fixed size
func (c *Cloud) ResizeInstanceGroup(instanceGroupName string, size int) error {
return ResizeInstanceGroup(c.asg, instanceGroupName, size)
}
// DescribeInstanceGroup gets info about the specified instancegroup
// Exported so it can be used by the e2e tests,
// which don't want to instantiate a full cloudprovider.
func DescribeInstanceGroup(asg ASG, instanceGroupName string) (InstanceGroupInfo, error) {
request := &autoscaling.DescribeAutoScalingGroupsInput{
AutoScalingGroupNames: []*string{aws.String(instanceGroupName)},
}
response, err := asg.DescribeAutoScalingGroups(request)
if err != nil {
return nil, fmt.Errorf("error listing AWS autoscaling group (%s): %v", instanceGroupName, err)
}
if len(response.AutoScalingGroups) == 0 {
return nil, nil
}
if len(response.AutoScalingGroups) > 1 {
glog.Warning("AWS returned multiple autoscaling groups with name ", instanceGroupName)
}
group := response.AutoScalingGroups[0]
return &awsInstanceGroup{group: group}, nil
}
// Implement InstanceGroups.DescribeInstanceGroup
// Queries the cloud provider for information about the specified instance group
func (c *Cloud) DescribeInstanceGroup(instanceGroupName string) (InstanceGroupInfo, error) {
return DescribeInstanceGroup(c.asg, instanceGroupName)
}
// awsInstanceGroup implements InstanceGroupInfo
var _ InstanceGroupInfo = &awsInstanceGroup{}
type awsInstanceGroup struct {
group *autoscaling.Group
}
// Implement InstanceGroupInfo.CurrentSize
// The number of instances currently running under control of this group
func (g *awsInstanceGroup) CurrentSize() (int, error) {
return len(g.group.Instances), nil
}

View file

@ -0,0 +1,475 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws
import (
"fmt"
"reflect"
"strconv"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/elb"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/sets"
)
const ProxyProtocolPolicyName = "k8s-proxyprotocol-enabled"
func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBalancerName string, listeners []*elb.Listener, subnetIDs []string, securityGroupIDs []string, internalELB, proxyProtocol bool, loadBalancerAttributes *elb.LoadBalancerAttributes) (*elb.LoadBalancerDescription, error) {
loadBalancer, err := c.describeLoadBalancer(loadBalancerName)
if err != nil {
return nil, err
}
dirty := false
if loadBalancer == nil {
createRequest := &elb.CreateLoadBalancerInput{}
createRequest.LoadBalancerName = aws.String(loadBalancerName)
createRequest.Listeners = listeners
if internalELB {
createRequest.Scheme = aws.String("internal")
}
// We are supposed to specify one subnet per AZ.
// TODO: What happens if we have more than one subnet per AZ?
createRequest.Subnets = stringPointerArray(subnetIDs)
createRequest.SecurityGroups = stringPointerArray(securityGroupIDs)
createRequest.Tags = []*elb.Tag{
{Key: aws.String(TagNameKubernetesCluster), Value: aws.String(c.getClusterName())},
{Key: aws.String(TagNameKubernetesService), Value: aws.String(namespacedName.String())},
}
glog.Infof("Creating load balancer for %v with name: %s", namespacedName, loadBalancerName)
_, err := c.elb.CreateLoadBalancer(createRequest)
if err != nil {
return nil, err
}
if proxyProtocol {
err = c.createProxyProtocolPolicy(loadBalancerName)
if err != nil {
return nil, err
}
for _, listener := range listeners {
glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to true", *listener.InstancePort)
err := c.setBackendPolicies(loadBalancerName, *listener.InstancePort, []*string{aws.String(ProxyProtocolPolicyName)})
if err != nil {
return nil, err
}
}
}
dirty = true
} else {
// TODO: Sync internal vs non-internal
{
// Sync subnets
expected := sets.NewString(subnetIDs...)
actual := stringSetFromPointers(loadBalancer.Subnets)
additions := expected.Difference(actual)
removals := actual.Difference(expected)
if removals.Len() != 0 {
request := &elb.DetachLoadBalancerFromSubnetsInput{}
request.LoadBalancerName = aws.String(loadBalancerName)
request.Subnets = stringSetToPointers(removals)
glog.V(2).Info("Detaching load balancer from removed subnets")
_, err := c.elb.DetachLoadBalancerFromSubnets(request)
if err != nil {
return nil, fmt.Errorf("error detaching AWS loadbalancer from subnets: %v", err)
}
dirty = true
}
if additions.Len() != 0 {
request := &elb.AttachLoadBalancerToSubnetsInput{}
request.LoadBalancerName = aws.String(loadBalancerName)
request.Subnets = stringSetToPointers(additions)
glog.V(2).Info("Attaching load balancer to added subnets")
_, err := c.elb.AttachLoadBalancerToSubnets(request)
if err != nil {
return nil, fmt.Errorf("error attaching AWS loadbalancer to subnets: %v", err)
}
dirty = true
}
}
{
// Sync security groups
expected := sets.NewString(securityGroupIDs...)
actual := stringSetFromPointers(loadBalancer.SecurityGroups)
if !expected.Equal(actual) {
// This call just replaces the security groups, unlike e.g. subnets (!)
request := &elb.ApplySecurityGroupsToLoadBalancerInput{}
request.LoadBalancerName = aws.String(loadBalancerName)
request.SecurityGroups = stringPointerArray(securityGroupIDs)
glog.V(2).Info("Applying updated security groups to load balancer")
_, err := c.elb.ApplySecurityGroupsToLoadBalancer(request)
if err != nil {
return nil, fmt.Errorf("error applying AWS loadbalancer security groups: %v", err)
}
dirty = true
}
}
{
// Sync listeners
listenerDescriptions := loadBalancer.ListenerDescriptions
foundSet := make(map[int]bool)
removals := []*int64{}
for _, listenerDescription := range listenerDescriptions {
actual := listenerDescription.Listener
if actual == nil {
glog.Warning("Ignoring empty listener in AWS loadbalancer: ", loadBalancerName)
continue
}
found := -1
for i, expected := range listeners {
if orEmpty(actual.Protocol) != orEmpty(expected.Protocol) {
continue
}
if orEmpty(actual.InstanceProtocol) != orEmpty(expected.InstanceProtocol) {
continue
}
if orZero(actual.InstancePort) != orZero(expected.InstancePort) {
continue
}
if orZero(actual.LoadBalancerPort) != orZero(expected.LoadBalancerPort) {
continue
}
if orEmpty(actual.SSLCertificateId) != orEmpty(expected.SSLCertificateId) {
continue
}
found = i
}
if found != -1 {
foundSet[found] = true
} else {
removals = append(removals, actual.LoadBalancerPort)
}
}
additions := []*elb.Listener{}
for i := range listeners {
if foundSet[i] {
continue
}
additions = append(additions, listeners[i])
}
if len(removals) != 0 {
request := &elb.DeleteLoadBalancerListenersInput{}
request.LoadBalancerName = aws.String(loadBalancerName)
request.LoadBalancerPorts = removals
glog.V(2).Info("Deleting removed load balancer listeners")
_, err := c.elb.DeleteLoadBalancerListeners(request)
if err != nil {
return nil, fmt.Errorf("error deleting AWS loadbalancer listeners: %v", err)
}
dirty = true
}
if len(additions) != 0 {
request := &elb.CreateLoadBalancerListenersInput{}
request.LoadBalancerName = aws.String(loadBalancerName)
request.Listeners = additions
glog.V(2).Info("Creating added load balancer listeners")
_, err := c.elb.CreateLoadBalancerListeners(request)
if err != nil {
return nil, fmt.Errorf("error creating AWS loadbalancer listeners: %v", err)
}
dirty = true
}
}
{
// Sync proxy protocol state for new and existing listeners
proxyPolicies := make([]*string, 0)
if proxyProtocol {
// Ensure the backend policy exists
// NOTE The documentation for the AWS API indicates we could get an HTTP 400
// back if a policy of the same name already exists. However, the aws-sdk does not
// seem to return an error to us in these cases. Therefore, this will issue an API
// request every time.
err := c.createProxyProtocolPolicy(loadBalancerName)
if err != nil {
return nil, err
}
proxyPolicies = append(proxyPolicies, aws.String(ProxyProtocolPolicyName))
}
foundBackends := make(map[int64]bool)
proxyProtocolBackends := make(map[int64]bool)
for _, backendListener := range loadBalancer.BackendServerDescriptions {
foundBackends[*backendListener.InstancePort] = false
proxyProtocolBackends[*backendListener.InstancePort] = proxyProtocolEnabled(backendListener)
}
for _, listener := range listeners {
setPolicy := false
instancePort := *listener.InstancePort
if currentState, ok := proxyProtocolBackends[instancePort]; !ok {
// This is a new ELB backend so we only need to worry about
// potentially adding a policy and not removing an
// existing one
setPolicy = proxyProtocol
} else {
foundBackends[instancePort] = true
// This is an existing ELB backend so we need to determine
// if the state changed
setPolicy = (currentState != proxyProtocol)
}
if setPolicy {
glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to %t", instancePort, proxyProtocol)
err := c.setBackendPolicies(loadBalancerName, instancePort, proxyPolicies)
if err != nil {
return nil, err
}
dirty = true
}
}
// We now need to figure out if any backend policies need removed
// because these old policies will stick around even if there is no
// corresponding listener anymore
for instancePort, found := range foundBackends {
if !found {
glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to false", instancePort)
err := c.setBackendPolicies(loadBalancerName, instancePort, []*string{})
if err != nil {
return nil, err
}
dirty = true
}
}
}
}
// Whether the ELB was new or existing, sync attributes regardless. This accounts for things
// that cannot be specified at the time of creation and can only be modified after the fact,
// e.g. idle connection timeout.
{
describeAttributesRequest := &elb.DescribeLoadBalancerAttributesInput{}
describeAttributesRequest.LoadBalancerName = aws.String(loadBalancerName)
describeAttributesOutput, err := c.elb.DescribeLoadBalancerAttributes(describeAttributesRequest)
if err != nil {
glog.Warning("Unable to retrieve load balancer attributes during attribute sync")
return nil, err
}
foundAttributes := &describeAttributesOutput.LoadBalancerAttributes
// Update attributes if they're dirty
if !reflect.DeepEqual(loadBalancerAttributes, foundAttributes) {
glog.V(2).Info("Updating load-balancer attributes for %q", loadBalancerName)
modifyAttributesRequest := &elb.ModifyLoadBalancerAttributesInput{}
modifyAttributesRequest.LoadBalancerName = aws.String(loadBalancerName)
modifyAttributesRequest.LoadBalancerAttributes = loadBalancerAttributes
_, err = c.elb.ModifyLoadBalancerAttributes(modifyAttributesRequest)
if err != nil {
return nil, fmt.Errorf("Unable to update load balancer attributes during attribute sync: %v", err)
}
dirty = true
}
}
if dirty {
loadBalancer, err = c.describeLoadBalancer(loadBalancerName)
if err != nil {
glog.Warning("Unable to retrieve load balancer after creation/update")
return nil, err
}
}
return loadBalancer, nil
}
// Makes sure that the health check for an ELB matches the configured listeners
func (c *Cloud) ensureLoadBalancerHealthCheck(loadBalancer *elb.LoadBalancerDescription, listeners []*elb.Listener) error {
actual := loadBalancer.HealthCheck
// Default AWS settings
expectedHealthyThreshold := int64(2)
expectedUnhealthyThreshold := int64(6)
expectedTimeout := int64(5)
expectedInterval := int64(10)
// We only configure a TCP health-check on the first port
expectedTarget := ""
for _, listener := range listeners {
if listener.InstancePort == nil {
continue
}
expectedTarget = "TCP:" + strconv.FormatInt(*listener.InstancePort, 10)
break
}
if expectedTarget == "" {
return fmt.Errorf("unable to determine health check port (no valid listeners)")
}
if expectedTarget == orEmpty(actual.Target) &&
expectedHealthyThreshold == orZero(actual.HealthyThreshold) &&
expectedUnhealthyThreshold == orZero(actual.UnhealthyThreshold) &&
expectedTimeout == orZero(actual.Timeout) &&
expectedInterval == orZero(actual.Interval) {
return nil
}
glog.V(2).Info("Updating load-balancer health-check")
healthCheck := &elb.HealthCheck{}
healthCheck.HealthyThreshold = &expectedHealthyThreshold
healthCheck.UnhealthyThreshold = &expectedUnhealthyThreshold
healthCheck.Timeout = &expectedTimeout
healthCheck.Interval = &expectedInterval
healthCheck.Target = &expectedTarget
request := &elb.ConfigureHealthCheckInput{}
request.HealthCheck = healthCheck
request.LoadBalancerName = loadBalancer.LoadBalancerName
_, err := c.elb.ConfigureHealthCheck(request)
if err != nil {
return fmt.Errorf("error configuring load-balancer health-check: %v", err)
}
return nil
}
// Makes sure that exactly the specified hosts are registered as instances with the load balancer
func (c *Cloud) ensureLoadBalancerInstances(loadBalancerName string, lbInstances []*elb.Instance, instances []*ec2.Instance) error {
expected := sets.NewString()
for _, instance := range instances {
expected.Insert(orEmpty(instance.InstanceId))
}
actual := sets.NewString()
for _, lbInstance := range lbInstances {
actual.Insert(orEmpty(lbInstance.InstanceId))
}
additions := expected.Difference(actual)
removals := actual.Difference(expected)
addInstances := []*elb.Instance{}
for _, instanceId := range additions.List() {
addInstance := &elb.Instance{}
addInstance.InstanceId = aws.String(instanceId)
addInstances = append(addInstances, addInstance)
}
removeInstances := []*elb.Instance{}
for _, instanceId := range removals.List() {
removeInstance := &elb.Instance{}
removeInstance.InstanceId = aws.String(instanceId)
removeInstances = append(removeInstances, removeInstance)
}
if len(addInstances) > 0 {
registerRequest := &elb.RegisterInstancesWithLoadBalancerInput{}
registerRequest.Instances = addInstances
registerRequest.LoadBalancerName = aws.String(loadBalancerName)
_, err := c.elb.RegisterInstancesWithLoadBalancer(registerRequest)
if err != nil {
return err
}
glog.V(1).Infof("Instances added to load-balancer %s", loadBalancerName)
}
if len(removeInstances) > 0 {
deregisterRequest := &elb.DeregisterInstancesFromLoadBalancerInput{}
deregisterRequest.Instances = removeInstances
deregisterRequest.LoadBalancerName = aws.String(loadBalancerName)
_, err := c.elb.DeregisterInstancesFromLoadBalancer(deregisterRequest)
if err != nil {
return err
}
glog.V(1).Infof("Instances removed from load-balancer %s", loadBalancerName)
}
return nil
}
func (c *Cloud) createProxyProtocolPolicy(loadBalancerName string) error {
request := &elb.CreateLoadBalancerPolicyInput{
LoadBalancerName: aws.String(loadBalancerName),
PolicyName: aws.String(ProxyProtocolPolicyName),
PolicyTypeName: aws.String("ProxyProtocolPolicyType"),
PolicyAttributes: []*elb.PolicyAttribute{
{
AttributeName: aws.String("ProxyProtocol"),
AttributeValue: aws.String("true"),
},
},
}
glog.V(2).Info("Creating proxy protocol policy on load balancer")
_, err := c.elb.CreateLoadBalancerPolicy(request)
if err != nil {
return fmt.Errorf("error creating proxy protocol policy on load balancer: %v", err)
}
return nil
}
func (c *Cloud) setBackendPolicies(loadBalancerName string, instancePort int64, policies []*string) error {
request := &elb.SetLoadBalancerPoliciesForBackendServerInput{
InstancePort: aws.Int64(instancePort),
LoadBalancerName: aws.String(loadBalancerName),
PolicyNames: policies,
}
if len(policies) > 0 {
glog.V(2).Infof("Adding AWS loadbalancer backend policies on node port %d", instancePort)
} else {
glog.V(2).Infof("Removing AWS loadbalancer backend policies on node port %d", instancePort)
}
_, err := c.elb.SetLoadBalancerPoliciesForBackendServer(request)
if err != nil {
return fmt.Errorf("error adjusting AWS loadbalancer backend policies: %v", err)
}
return nil
}
func proxyProtocolEnabled(backend *elb.BackendServerDescription) bool {
for _, policy := range backend.PolicyNames {
if aws.StringValue(policy) == ProxyProtocolPolicyName {
return true
}
}
return false
}

View file

@ -0,0 +1,188 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/cloudprovider"
)
func (c *Cloud) findRouteTable(clusterName string) (*ec2.RouteTable, error) {
// This should be unnecessary (we already filter on TagNameKubernetesCluster,
// and something is broken if cluster name doesn't match, but anyway...
// TODO: All clouds should be cluster-aware by default
filters := []*ec2.Filter{newEc2Filter("tag:"+TagNameKubernetesCluster, clusterName)}
request := &ec2.DescribeRouteTablesInput{Filters: c.addFilters(filters)}
tables, err := c.ec2.DescribeRouteTables(request)
if err != nil {
return nil, err
}
if len(tables) == 0 {
return nil, fmt.Errorf("unable to find route table for AWS cluster: %s", clusterName)
}
if len(tables) != 1 {
return nil, fmt.Errorf("found multiple matching AWS route tables for AWS cluster: %s", clusterName)
}
return tables[0], nil
}
// ListRoutes implements Routes.ListRoutes
// List all routes that match the filter
func (c *Cloud) ListRoutes(clusterName string) ([]*cloudprovider.Route, error) {
table, err := c.findRouteTable(clusterName)
if err != nil {
return nil, err
}
var routes []*cloudprovider.Route
var instanceIDs []*string
for _, r := range table.Routes {
instanceID := orEmpty(r.InstanceId)
if instanceID == "" {
continue
}
instanceIDs = append(instanceIDs, &instanceID)
}
instances, err := c.getInstancesByIDs(instanceIDs)
if err != nil {
return nil, err
}
for _, r := range table.Routes {
instanceID := orEmpty(r.InstanceId)
destinationCIDR := orEmpty(r.DestinationCidrBlock)
if instanceID == "" || destinationCIDR == "" {
continue
}
instance, found := instances[instanceID]
if !found {
glog.Warningf("unable to find instance ID %s in the list of instances being routed to", instanceID)
continue
}
nodeName := mapInstanceToNodeName(instance)
routeName := clusterName + "-" + destinationCIDR
routes = append(routes, &cloudprovider.Route{Name: routeName, TargetNode: nodeName, DestinationCIDR: destinationCIDR})
}
return routes, nil
}
// Sets the instance attribute "source-dest-check" to the specified value
func (c *Cloud) configureInstanceSourceDestCheck(instanceID string, sourceDestCheck bool) error {
request := &ec2.ModifyInstanceAttributeInput{}
request.InstanceId = aws.String(instanceID)
request.SourceDestCheck = &ec2.AttributeBooleanValue{Value: aws.Bool(sourceDestCheck)}
_, err := c.ec2.ModifyInstanceAttribute(request)
if err != nil {
return fmt.Errorf("error configuring source-dest-check on instance %s: %v", instanceID, err)
}
return nil
}
// CreateRoute implements Routes.CreateRoute
// Create the described route
func (c *Cloud) CreateRoute(clusterName string, nameHint string, route *cloudprovider.Route) error {
instance, err := c.getInstanceByNodeName(route.TargetNode)
if err != nil {
return err
}
// In addition to configuring the route itself, we also need to configure the instance to accept that traffic
// On AWS, this requires turning source-dest checks off
err = c.configureInstanceSourceDestCheck(orEmpty(instance.InstanceId), false)
if err != nil {
return err
}
table, err := c.findRouteTable(clusterName)
if err != nil {
return err
}
var deleteRoute *ec2.Route
for _, r := range table.Routes {
destinationCIDR := aws.StringValue(r.DestinationCidrBlock)
if destinationCIDR != route.DestinationCIDR {
continue
}
if aws.StringValue(r.State) == ec2.RouteStateBlackhole {
deleteRoute = r
}
}
if deleteRoute != nil {
glog.Infof("deleting blackholed route: %s", aws.StringValue(deleteRoute.DestinationCidrBlock))
request := &ec2.DeleteRouteInput{}
request.DestinationCidrBlock = deleteRoute.DestinationCidrBlock
request.RouteTableId = table.RouteTableId
_, err = c.ec2.DeleteRoute(request)
if err != nil {
return fmt.Errorf("error deleting blackholed AWS route (%s): %v", aws.StringValue(deleteRoute.DestinationCidrBlock), err)
}
}
request := &ec2.CreateRouteInput{}
// TODO: use ClientToken for idempotency?
request.DestinationCidrBlock = aws.String(route.DestinationCIDR)
request.InstanceId = instance.InstanceId
request.RouteTableId = table.RouteTableId
_, err = c.ec2.CreateRoute(request)
if err != nil {
return fmt.Errorf("error creating AWS route (%s): %v", route.DestinationCIDR, err)
}
return nil
}
// DeleteRoute implements Routes.DeleteRoute
// Delete the specified route
func (c *Cloud) DeleteRoute(clusterName string, route *cloudprovider.Route) error {
table, err := c.findRouteTable(clusterName)
if err != nil {
return err
}
request := &ec2.DeleteRouteInput{}
request.DestinationCidrBlock = aws.String(route.DestinationCIDR)
request.RouteTableId = table.RouteTableId
_, err = c.ec2.DeleteRoute(request)
if err != nil {
return fmt.Errorf("error deleting AWS route (%s): %v", route.DestinationCIDR, err)
}
return nil
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,50 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws
import (
"github.com/aws/aws-sdk-go/aws"
"k8s.io/kubernetes/pkg/util/sets"
)
func stringSetToPointers(in sets.String) []*string {
if in == nil {
return nil
}
out := make([]*string, 0, len(in))
for k := range in {
out = append(out, aws.String(k))
}
return out
}
func stringSetFromPointers(in []*string) sets.String {
if in == nil {
return nil
}
out := sets.NewString()
for i := range in {
out.Insert(orEmpty(in[i]))
}
return out
}
// orZero returns the value, or 0 if the pointer is nil
// Deprecated: prefer aws.Int64Value
func orZero(v *int64) int64 {
return aws.Int64Value(v)
}

View file

@ -0,0 +1,34 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws
import (
"github.com/aws/aws-sdk-go/aws/request"
"github.com/golang/glog"
)
// Handler for aws-sdk-go that logs all requests
func awsHandlerLogger(req *request.Request) {
service := req.ClientInfo.ServiceName
name := "?"
if req.Operation != nil {
name = req.Operation.Name
}
glog.V(4).Infof("AWS request: %s %s", service, name)
}

View file

@ -0,0 +1,161 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws
import (
"math"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/golang/glog"
)
const (
decayIntervalSeconds = 20
decayFraction = 0.8
maxDelay = 60 * time.Second
)
// CrossRequestRetryDelay inserts delays before AWS calls, when we are observing RequestLimitExceeded errors
// Note that we share a CrossRequestRetryDelay across multiple AWS requests; this is a process-wide back-off,
// whereas the aws-sdk-go implements a per-request exponential backoff/retry
type CrossRequestRetryDelay struct {
backoff Backoff
}
// Create a new CrossRequestRetryDelay
func NewCrossRequestRetryDelay() *CrossRequestRetryDelay {
c := &CrossRequestRetryDelay{}
c.backoff.init(decayIntervalSeconds, decayFraction, maxDelay)
return c
}
// Added to the Sign chain; called before each request
func (c *CrossRequestRetryDelay) BeforeSign(r *request.Request) {
now := time.Now()
delay := c.backoff.ComputeDelayForRequest(now)
if delay > 0 {
glog.Warningf("Inserting delay before AWS request (%s) to avoid RequestLimitExceeded: %s",
describeRequest(r), delay.String())
r.Config.SleepDelay(delay)
// Avoid clock skew problems
r.Time = now
}
}
// Return a user-friendly string describing the request, for use in log messages
func describeRequest(r *request.Request) string {
service := r.ClientInfo.ServiceName
name := "?"
if r.Operation != nil {
name = r.Operation.Name
}
return service + "::" + name
}
// Added to the AfterRetry chain; called after any error
func (c *CrossRequestRetryDelay) AfterRetry(r *request.Request) {
if r.Error == nil {
return
}
awsError, ok := r.Error.(awserr.Error)
if !ok {
return
}
if awsError.Code() == "RequestLimitExceeded" {
c.backoff.ReportError()
glog.Warningf("Got RequestLimitExceeded error on AWS request (%s)",
describeRequest(r))
}
}
// Backoff manages a backoff that varies based on the recently observed failures
type Backoff struct {
decayIntervalSeconds int64
decayFraction float64
maxDelay time.Duration
mutex sync.Mutex
// We count all requests & the number of requests which hit a
// RequestLimit. We only really care about 'recent' requests, so we
// decay the counts exponentially to bias towards recent values.
countErrorsRequestLimit float32
countRequests float32
lastDecay int64
}
func (b *Backoff) init(decayIntervalSeconds int, decayFraction float64, maxDelay time.Duration) {
b.lastDecay = time.Now().Unix()
// Bias so that if the first request hits the limit we don't immediately apply the full delay
b.countRequests = 4
b.decayIntervalSeconds = int64(decayIntervalSeconds)
b.decayFraction = decayFraction
b.maxDelay = maxDelay
}
// Computes the delay required for a request, also updating internal state to count this request
func (b *Backoff) ComputeDelayForRequest(now time.Time) time.Duration {
b.mutex.Lock()
defer b.mutex.Unlock()
// Apply exponential decay to the counters
timeDeltaSeconds := now.Unix() - b.lastDecay
if timeDeltaSeconds > b.decayIntervalSeconds {
intervals := float64(timeDeltaSeconds) / float64(b.decayIntervalSeconds)
decay := float32(math.Pow(b.decayFraction, intervals))
b.countErrorsRequestLimit *= decay
b.countRequests *= decay
b.lastDecay = now.Unix()
}
// Count this request
b.countRequests += 1.0
// Compute the failure rate
errorFraction := float32(0.0)
if b.countRequests > 0.5 {
// Avoid tiny residuals & rounding errors
errorFraction = b.countErrorsRequestLimit / b.countRequests
}
// Ignore a low fraction of errors
// This also allows them to time-out
if errorFraction < 0.1 {
return time.Duration(0)
}
// Delay by the max delay multiplied by the recent error rate
// (i.e. we apply a linear delay function)
// TODO: This is pretty arbitrary
delay := time.Nanosecond * time.Duration(float32(b.maxDelay.Nanoseconds())*errorFraction)
// Round down to the nearest second for sanity
return time.Second * time.Duration(int(delay.Seconds()))
}
// Called when we observe a throttling error
func (b *Backoff) ReportError() {
b.mutex.Lock()
defer b.mutex.Unlock()
b.countErrorsRequestLimit += 1.0
}

View file

@ -0,0 +1,135 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws
import (
"testing"
"time"
)
// There follows a group of tests for the backoff logic. There's nothing
// particularly special about the values chosen: if we tweak the values in the
// backoff logic then we might well have to update the tests. However the key
// behavioural elements should remain (e.g. no errors => no backoff), and these
// are each tested by one of the tests below.
// Test that we don't apply any delays when there are no errors
func TestBackoffNoErrors(t *testing.T) {
b := &Backoff{}
b.init(decayIntervalSeconds, decayFraction, maxDelay)
now := time.Now()
for i := 0; i < 100; i++ {
d := b.ComputeDelayForRequest(now)
if d.Nanoseconds() != 0 {
t.Fatalf("unexpected delay during no-error case")
}
now = now.Add(time.Second)
}
}
// Test that we always apply a delay when there are errors, and also that we
// don't "flap" - that our own delay doesn't cause us to oscillate between
// delay and no-delay.
func TestBackoffAllErrors(t *testing.T) {
b := &Backoff{}
b.init(decayIntervalSeconds, decayFraction, maxDelay)
now := time.Now()
// Warm up
for i := 0; i < 10; i++ {
_ = b.ComputeDelayForRequest(now)
b.ReportError()
now = now.Add(time.Second)
}
for i := 0; i < 100; i++ {
d := b.ComputeDelayForRequest(now)
b.ReportError()
if d.Seconds() < 5 {
t.Fatalf("unexpected short-delay during all-error case: %v", d)
}
t.Logf("delay @%d %v", i, d)
now = now.Add(d)
}
}
// Test that we do come close to our max delay, when we see all errors at 1
// second intervals (this simulates multiple concurrent requests, because we
// don't wait for delay in between requests)
func TestBackoffHitsMax(t *testing.T) {
b := &Backoff{}
b.init(decayIntervalSeconds, decayFraction, maxDelay)
now := time.Now()
for i := 0; i < 100; i++ {
_ = b.ComputeDelayForRequest(now)
b.ReportError()
now = now.Add(time.Second)
}
for i := 0; i < 10; i++ {
d := b.ComputeDelayForRequest(now)
b.ReportError()
if float32(d.Nanoseconds()) < (float32(maxDelay.Nanoseconds()) * 0.95) {
t.Fatalf("expected delay to be >= 95 percent of max delay, was %v", d)
}
t.Logf("delay @%d %v", i, d)
now = now.Add(time.Second)
}
}
// Test that after a phase of errors, we eventually stop applying a delay once there are
// no more errors.
func TestBackoffRecovers(t *testing.T) {
b := &Backoff{}
b.init(decayIntervalSeconds, decayFraction, maxDelay)
now := time.Now()
// Phase of all-errors
for i := 0; i < 100; i++ {
_ = b.ComputeDelayForRequest(now)
b.ReportError()
now = now.Add(time.Second)
}
for i := 0; i < 10; i++ {
d := b.ComputeDelayForRequest(now)
b.ReportError()
if d.Seconds() < 5 {
t.Fatalf("unexpected short-delay during all-error phase: %v", d)
}
t.Logf("error phase delay @%d %v", i, d)
now = now.Add(time.Second)
}
// Phase of no errors
for i := 0; i < 100; i++ {
_ = b.ComputeDelayForRequest(now)
now = now.Add(3 * time.Second)
}
for i := 0; i < 10; i++ {
d := b.ComputeDelayForRequest(now)
if d.Seconds() != 0 {
t.Fatalf("unexpected delay during error recovery phase: %v", d)
}
t.Logf("no-error phase delay @%d %v", i, d)
now = now.Add(time.Second)
}
}

View file

@ -0,0 +1,146 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws
import (
"encoding/json"
"fmt"
"github.com/aws/aws-sdk-go/service/ec2"
)
type IPPermissionSet map[string]*ec2.IpPermission
func NewIPPermissionSet(items ...*ec2.IpPermission) IPPermissionSet {
s := make(IPPermissionSet)
s.Insert(items...)
return s
}
// Ungroup splits permissions out into individual permissions
// EC2 will combine permissions with the same port but different SourceRanges together, for example
// We ungroup them so we can process them
func (s IPPermissionSet) Ungroup() IPPermissionSet {
l := []*ec2.IpPermission{}
for _, p := range s.List() {
if len(p.IpRanges) <= 1 {
l = append(l, p)
continue
}
for _, ipRange := range p.IpRanges {
c := &ec2.IpPermission{}
*c = *p
c.IpRanges = []*ec2.IpRange{ipRange}
l = append(l, c)
}
}
l2 := []*ec2.IpPermission{}
for _, p := range l {
if len(p.UserIdGroupPairs) <= 1 {
l2 = append(l2, p)
continue
}
for _, u := range p.UserIdGroupPairs {
c := &ec2.IpPermission{}
*c = *p
c.UserIdGroupPairs = []*ec2.UserIdGroupPair{u}
l2 = append(l, c)
}
}
l3 := []*ec2.IpPermission{}
for _, p := range l2 {
if len(p.PrefixListIds) <= 1 {
l3 = append(l3, p)
continue
}
for _, v := range p.PrefixListIds {
c := &ec2.IpPermission{}
*c = *p
c.PrefixListIds = []*ec2.PrefixListId{v}
l3 = append(l3, c)
}
}
return NewIPPermissionSet(l3...)
}
// Insert adds items to the set.
func (s IPPermissionSet) Insert(items ...*ec2.IpPermission) {
for _, p := range items {
k := keyForIPPermission(p)
s[k] = p
}
}
// List returns the contents as a slice. Order is not defined.
func (s IPPermissionSet) List() []*ec2.IpPermission {
res := make([]*ec2.IpPermission, 0, len(s))
for _, v := range s {
res = append(res, v)
}
return res
}
// IsSuperset returns true if and only if s1 is a superset of s2.
func (s1 IPPermissionSet) IsSuperset(s2 IPPermissionSet) bool {
for k := range s2 {
_, found := s1[k]
if !found {
return false
}
}
return true
}
// Equal returns true if and only if s1 is equal (as a set) to s2.
// Two sets are equal if their membership is identical.
// (In practice, this means same elements, order doesn't matter)
func (s1 IPPermissionSet) Equal(s2 IPPermissionSet) bool {
return len(s1) == len(s2) && s1.IsSuperset(s2)
}
// Difference returns a set of objects that are not in s2
// For example:
// s1 = {a1, a2, a3}
// s2 = {a1, a2, a4, a5}
// s1.Difference(s2) = {a3}
// s2.Difference(s1) = {a4, a5}
func (s IPPermissionSet) Difference(s2 IPPermissionSet) IPPermissionSet {
result := NewIPPermissionSet()
for k, v := range s {
_, found := s2[k]
if !found {
result[k] = v
}
}
return result
}
// Len returns the size of the set.
func (s IPPermissionSet) Len() int {
return len(s)
}
func keyForIPPermission(p *ec2.IpPermission) string {
v, err := json.Marshal(p)
if err != nil {
panic(fmt.Sprintf("error building JSON representation of ec2.IpPermission: %v", err))
}
return string(v)
}

View file

@ -0,0 +1,84 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws
import (
"fmt"
"net/url"
"strings"
"github.com/aws/aws-sdk-go/aws"
)
// awsVolumeID represents the ID of the volume in the AWS API, e.g. vol-12345678a
// The "traditional" format is "vol-12345678"
// A new longer format is also being introduced: "vol-12345678abcdef01"
// We should not assume anything about the length or format, though it seems
// reasonable to assume that volumes will continue to start with "vol-".
type awsVolumeID string
func (i awsVolumeID) awsString() *string {
return aws.String(string(i))
}
// KubernetesVolumeID represents the id for a volume in the kubernetes API;
// a few forms are recognized:
// * aws://<zone>/<awsVolumeId>
// * aws:///<awsVolumeId>
// * <awsVolumeId>
type KubernetesVolumeID string
// mapToAWSVolumeID extracts the awsVolumeID from the KubernetesVolumeID
func (name KubernetesVolumeID) mapToAWSVolumeID() (awsVolumeID, error) {
// name looks like aws://availability-zone/awsVolumeId
// The original idea of the URL-style name was to put the AZ into the
// host, so we could find the AZ immediately from the name without
// querying the API. But it turns out we don't actually need it for
// multi-AZ clusters, as we put the AZ into the labels on the PV instead.
// However, if in future we want to support multi-AZ cluster
// volume-awareness without using PersistentVolumes, we likely will
// want the AZ in the host.
s := string(name)
if !strings.HasPrefix(s, "aws://") {
// Assume a bare aws volume id (vol-1234...)
// Build a URL with an empty host (AZ)
s = "aws://" + "" + "/" + s
}
url, err := url.Parse(s)
if err != nil {
// TODO: Maybe we should pass a URL into the Volume functions
return "", fmt.Errorf("Invalid disk name (%s): %v", name, err)
}
if url.Scheme != "aws" {
return "", fmt.Errorf("Invalid scheme for AWS volume (%s)", name)
}
awsID := url.Path
awsID = strings.Trim(awsID, "/")
// We sanity check the resulting volume; the two known formats are
// vol-12345678 and vol-12345678abcdef01
// TODO: Regex match?
if strings.Contains(awsID, "/") || !strings.HasPrefix(awsID, "vol-") {
return "", fmt.Errorf("Invalid format for AWS volume (%s)", name)
}
return awsVolumeID(awsID), nil
}