Add glide.yaml and vendor deps
This commit is contained in:
parent
db918f12ad
commit
5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions
55
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/BUILD
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"loadbalancer.go",
|
||||
"port_allocator.go",
|
||||
"proxier.go",
|
||||
"proxysocket.go",
|
||||
"roundrobin.go",
|
||||
"udp_server.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/errors:go_default_library",
|
||||
"//pkg/util/net:go_default_library",
|
||||
"//pkg/util/netsh:go_default_library",
|
||||
"//pkg/util/runtime:go_default_library",
|
||||
"//pkg/util/slice:go_default_library",
|
||||
"//pkg/util/wait:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"port_allocator_test.go",
|
||||
"proxier_test.go",
|
||||
"roundrobin_test.go",
|
||||
],
|
||||
library = "go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/types:go_default_library",
|
||||
"//pkg/util/net:go_default_library",
|
||||
"//pkg/util/netsh/testing:go_default_library",
|
||||
"//pkg/util/runtime:go_default_library",
|
||||
],
|
||||
)
|
||||
34
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/loadbalancer.go
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/loadbalancer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package winuserspace
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
)
|
||||
|
||||
// LoadBalancer is an interface for distributing incoming requests to service endpoints.
|
||||
type LoadBalancer interface {
|
||||
// NextEndpoint returns the endpoint to handle a request for the given
|
||||
// service-port and source address.
|
||||
NextEndpoint(service proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error)
|
||||
NewService(service proxy.ServicePortName, sessionAffinityType api.ServiceAffinity, stickyMaxAgeMinutes int) error
|
||||
DeleteService(service proxy.ServicePortName)
|
||||
CleanupStaleStickySessions(service proxy.ServicePortName)
|
||||
}
|
||||
153
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/port_allocator.go
generated
vendored
Normal file
153
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/port_allocator.go
generated
vendored
Normal file
|
|
@ -0,0 +1,153 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package winuserspace
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util/net"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
)
|
||||
|
||||
var (
|
||||
errPortRangeNoPortsRemaining = errors.New("port allocation failed; there are no remaining ports left to allocate in the accepted range")
|
||||
)
|
||||
|
||||
type PortAllocator interface {
|
||||
AllocateNext() (int, error)
|
||||
Release(int)
|
||||
}
|
||||
|
||||
// randomAllocator is a PortAllocator implementation that allocates random ports, yielding
|
||||
// a port value of 0 for every call to AllocateNext().
|
||||
type randomAllocator struct{}
|
||||
|
||||
// AllocateNext always returns 0
|
||||
func (r *randomAllocator) AllocateNext() (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Release is a noop
|
||||
func (r *randomAllocator) Release(_ int) {
|
||||
// noop
|
||||
}
|
||||
|
||||
// newPortAllocator builds PortAllocator for a given PortRange. If the PortRange is empty
|
||||
// then a random port allocator is returned; otherwise, a new range-based allocator
|
||||
// is returned.
|
||||
func newPortAllocator(r net.PortRange) PortAllocator {
|
||||
if r.Base == 0 {
|
||||
return &randomAllocator{}
|
||||
}
|
||||
return newPortRangeAllocator(r)
|
||||
}
|
||||
|
||||
const (
|
||||
portsBufSize = 16
|
||||
nextFreePortCooldown = 500 * time.Millisecond
|
||||
allocateNextTimeout = 1 * time.Second
|
||||
)
|
||||
|
||||
type rangeAllocator struct {
|
||||
net.PortRange
|
||||
ports chan int
|
||||
used big.Int
|
||||
lock sync.Mutex
|
||||
rand *rand.Rand
|
||||
}
|
||||
|
||||
func newPortRangeAllocator(r net.PortRange) PortAllocator {
|
||||
if r.Base == 0 || r.Size == 0 {
|
||||
panic("illegal argument: may not specify an empty port range")
|
||||
}
|
||||
ra := &rangeAllocator{
|
||||
PortRange: r,
|
||||
ports: make(chan int, portsBufSize),
|
||||
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
}
|
||||
go wait.Until(func() { ra.fillPorts(wait.NeverStop) }, nextFreePortCooldown, wait.NeverStop)
|
||||
return ra
|
||||
}
|
||||
|
||||
// fillPorts loops, always searching for the next free port and, if found, fills the ports buffer with it.
|
||||
// this func blocks until either there are no remaining free ports, or else the stopCh chan is closed.
|
||||
func (r *rangeAllocator) fillPorts(stopCh <-chan struct{}) {
|
||||
for {
|
||||
port := r.nextFreePort()
|
||||
if port == -1 {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
case r.ports <- port:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// nextFreePort finds a free port, first picking a random port. if that port is already in use
|
||||
// then the port range is scanned sequentially until either a port is found or the scan completes
|
||||
// unsuccessfully. an unsuccessful scan returns a port of -1.
|
||||
func (r *rangeAllocator) nextFreePort() int {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
// choose random port
|
||||
j := r.rand.Intn(r.Size)
|
||||
if b := r.used.Bit(j); b == 0 {
|
||||
r.used.SetBit(&r.used, j, 1)
|
||||
return j + r.Base
|
||||
}
|
||||
|
||||
// search sequentially
|
||||
for i := j + 1; i < r.Size; i++ {
|
||||
if b := r.used.Bit(i); b == 0 {
|
||||
r.used.SetBit(&r.used, i, 1)
|
||||
return i + r.Base
|
||||
}
|
||||
}
|
||||
for i := 0; i < j; i++ {
|
||||
if b := r.used.Bit(i); b == 0 {
|
||||
r.used.SetBit(&r.used, i, 1)
|
||||
return i + r.Base
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (r *rangeAllocator) AllocateNext() (port int, err error) {
|
||||
select {
|
||||
case port = <-r.ports:
|
||||
case <-time.After(allocateNextTimeout):
|
||||
err = errPortRangeNoPortsRemaining
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *rangeAllocator) Release(port int) {
|
||||
port -= r.Base
|
||||
if port < 0 || port >= r.Size {
|
||||
return
|
||||
}
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
r.used.SetBit(&r.used, port, 0)
|
||||
}
|
||||
101
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/port_allocator_test.go
generated
vendored
Normal file
101
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/port_allocator_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,101 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package winuserspace
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util/net"
|
||||
)
|
||||
|
||||
func TestRangeAllocatorEmpty(t *testing.T) {
|
||||
r := &net.PortRange{}
|
||||
r.Set("0-0")
|
||||
defer func() {
|
||||
if rv := recover(); rv == nil {
|
||||
t.Fatalf("expected panic because of empty port range: %#v", r)
|
||||
}
|
||||
}()
|
||||
_ = newPortRangeAllocator(*r)
|
||||
}
|
||||
|
||||
func TestRangeAllocatorFullyAllocated(t *testing.T) {
|
||||
r := &net.PortRange{}
|
||||
r.Set("1-1")
|
||||
a := newPortRangeAllocator(*r)
|
||||
p, err := a.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if p != 1 {
|
||||
t.Fatalf("unexpected allocated port: %d", p)
|
||||
}
|
||||
|
||||
_, err = a.AllocateNext()
|
||||
if err == nil {
|
||||
t.Fatalf("expected error because of fully-allocated range")
|
||||
}
|
||||
|
||||
a.Release(p)
|
||||
p, err = a.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if p != 1 {
|
||||
t.Fatalf("unexpected allocated port: %d", p)
|
||||
}
|
||||
|
||||
_, err = a.AllocateNext()
|
||||
if err == nil {
|
||||
t.Fatalf("expected error because of fully-allocated range")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRangeAllocator_RandomishAllocation(t *testing.T) {
|
||||
r := &net.PortRange{}
|
||||
r.Set("1-100")
|
||||
a := newPortRangeAllocator(*r)
|
||||
|
||||
// allocate all the ports
|
||||
var err error
|
||||
ports := make([]int, 100, 100)
|
||||
for i := 0; i < 100; i++ {
|
||||
ports[i], err = a.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// release them all
|
||||
for i := 0; i < 100; i++ {
|
||||
a.Release(ports[i])
|
||||
}
|
||||
|
||||
// allocate the ports again
|
||||
rports := make([]int, 100, 100)
|
||||
for i := 0; i < 100; i++ {
|
||||
rports[i], err = a.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(ports, rports) {
|
||||
t.Fatalf("expected re-allocated ports to be in a somewhat random order")
|
||||
}
|
||||
}
|
||||
682
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier.go
generated
vendored
Normal file
682
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
855
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier_test.go
generated
vendored
Normal file
855
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
300
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxysocket.go
generated
vendored
Normal file
300
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxysocket.go
generated
vendored
Normal file
|
|
@ -0,0 +1,300 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package winuserspace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
"k8s.io/kubernetes/pkg/util/runtime"
|
||||
)
|
||||
|
||||
// Abstraction over TCP/UDP sockets which are proxied.
|
||||
type proxySocket interface {
|
||||
// Addr gets the net.Addr for a proxySocket.
|
||||
Addr() net.Addr
|
||||
// Close stops the proxySocket from accepting incoming connections.
|
||||
// Each implementation should comment on the impact of calling Close
|
||||
// while sessions are active.
|
||||
Close() error
|
||||
// ProxyLoop proxies incoming connections for the specified service to the service endpoints.
|
||||
ProxyLoop(service proxy.ServicePortName, info *serviceInfo, proxier *Proxier)
|
||||
// ListenPort returns the host port that the proxySocket is listening on
|
||||
ListenPort() int
|
||||
}
|
||||
|
||||
func newProxySocket(protocol api.Protocol, ip net.IP, port int) (proxySocket, error) {
|
||||
host := ""
|
||||
if ip != nil {
|
||||
host = ip.String()
|
||||
}
|
||||
|
||||
switch strings.ToUpper(string(protocol)) {
|
||||
case "TCP":
|
||||
listener, err := net.Listen("tcp", net.JoinHostPort(host, strconv.Itoa(port)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &tcpProxySocket{Listener: listener, port: port}, nil
|
||||
case "UDP":
|
||||
addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(host, strconv.Itoa(port)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn, err := net.ListenUDP("udp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &udpProxySocket{UDPConn: conn, port: port}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown protocol %q", protocol)
|
||||
}
|
||||
|
||||
// How long we wait for a connection to a backend in seconds
|
||||
var endpointDialTimeout = []time.Duration{250 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second, 2 * time.Second}
|
||||
|
||||
// tcpProxySocket implements proxySocket. Close() is implemented by net.Listener. When Close() is called,
|
||||
// no new connections are allowed but existing connections are left untouched.
|
||||
type tcpProxySocket struct {
|
||||
net.Listener
|
||||
port int
|
||||
}
|
||||
|
||||
func (tcp *tcpProxySocket) ListenPort() int {
|
||||
return tcp.port
|
||||
}
|
||||
|
||||
func tryConnect(service proxy.ServicePortName, srcAddr net.Addr, protocol string, proxier *Proxier) (out net.Conn, err error) {
|
||||
sessionAffinityReset := false
|
||||
for _, dialTimeout := range endpointDialTimeout {
|
||||
endpoint, err := proxier.loadBalancer.NextEndpoint(service, srcAddr, sessionAffinityReset)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't find an endpoint for %s: %v", service, err)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint)
|
||||
// TODO: This could spin up a new goroutine to make the outbound connection,
|
||||
// and keep accepting inbound traffic.
|
||||
outConn, err := net.DialTimeout(protocol, endpoint, dialTimeout)
|
||||
if err != nil {
|
||||
if isTooManyFDsError(err) {
|
||||
panic("Dial failed: " + err.Error())
|
||||
}
|
||||
glog.Errorf("Dial failed: %v", err)
|
||||
sessionAffinityReset = true
|
||||
continue
|
||||
}
|
||||
return outConn, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to connect to an endpoint.")
|
||||
}
|
||||
|
||||
func (tcp *tcpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *serviceInfo, proxier *Proxier) {
|
||||
for {
|
||||
if !myInfo.isAlive() {
|
||||
// The service port was closed or replaced.
|
||||
return
|
||||
}
|
||||
// Block until a connection is made.
|
||||
inConn, err := tcp.Accept()
|
||||
if err != nil {
|
||||
if isTooManyFDsError(err) {
|
||||
panic("Accept failed: " + err.Error())
|
||||
}
|
||||
|
||||
if isClosedError(err) {
|
||||
return
|
||||
}
|
||||
if !myInfo.isAlive() {
|
||||
// Then the service port was just closed so the accept failure is to be expected.
|
||||
return
|
||||
}
|
||||
glog.Errorf("Accept failed: %v", err)
|
||||
continue
|
||||
}
|
||||
glog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr())
|
||||
outConn, err := tryConnect(service, inConn.(*net.TCPConn).RemoteAddr(), "tcp", proxier)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to connect to balancer: %v", err)
|
||||
inConn.Close()
|
||||
continue
|
||||
}
|
||||
// Spin up an async copy loop.
|
||||
go proxyTCP(inConn.(*net.TCPConn), outConn.(*net.TCPConn))
|
||||
}
|
||||
}
|
||||
|
||||
// proxyTCP proxies data bi-directionally between in and out.
|
||||
func proxyTCP(in, out *net.TCPConn) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
glog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v",
|
||||
in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr())
|
||||
go copyBytes("from backend", in, out, &wg)
|
||||
go copyBytes("to backend", out, in, &wg)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func copyBytes(direction string, dest, src *net.TCPConn, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
glog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr())
|
||||
n, err := io.Copy(dest, src)
|
||||
if err != nil {
|
||||
if !isClosedError(err) {
|
||||
glog.Errorf("I/O error: %v", err)
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr())
|
||||
dest.Close()
|
||||
src.Close()
|
||||
}
|
||||
|
||||
// udpProxySocket implements proxySocket. Close() is implemented by net.UDPConn. When Close() is called,
|
||||
// no new connections are allowed and existing connections are broken.
|
||||
// TODO: We could lame-duck this ourselves, if it becomes important.
|
||||
type udpProxySocket struct {
|
||||
*net.UDPConn
|
||||
port int
|
||||
}
|
||||
|
||||
func (udp *udpProxySocket) ListenPort() int {
|
||||
return udp.port
|
||||
}
|
||||
|
||||
func (udp *udpProxySocket) Addr() net.Addr {
|
||||
return udp.LocalAddr()
|
||||
}
|
||||
|
||||
// Holds all the known UDP clients that have not timed out.
|
||||
type clientCache struct {
|
||||
mu sync.Mutex
|
||||
clients map[string]net.Conn // addr string -> connection
|
||||
}
|
||||
|
||||
func newClientCache() *clientCache {
|
||||
return &clientCache{clients: map[string]net.Conn{}}
|
||||
}
|
||||
|
||||
func (udp *udpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *serviceInfo, proxier *Proxier) {
|
||||
var buffer [4096]byte // 4KiB should be enough for most whole-packets
|
||||
for {
|
||||
if !myInfo.isAlive() {
|
||||
// The service port was closed or replaced.
|
||||
break
|
||||
}
|
||||
|
||||
// Block until data arrives.
|
||||
// TODO: Accumulate a histogram of n or something, to fine tune the buffer size.
|
||||
n, cliAddr, err := udp.ReadFrom(buffer[0:])
|
||||
if err != nil {
|
||||
if e, ok := err.(net.Error); ok {
|
||||
if e.Temporary() {
|
||||
glog.V(1).Infof("ReadFrom had a temporary failure: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
glog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err)
|
||||
break
|
||||
}
|
||||
// If this is a client we know already, reuse the connection and goroutine.
|
||||
svrConn, err := udp.getBackendConn(myInfo.activeClients, cliAddr, proxier, service, myInfo.timeout)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// TODO: It would be nice to let the goroutine handle this write, but we don't
|
||||
// really want to copy the buffer. We could do a pool of buffers or something.
|
||||
_, err = svrConn.Write(buffer[0:n])
|
||||
if err != nil {
|
||||
if !logTimeout(err) {
|
||||
glog.Errorf("Write failed: %v", err)
|
||||
// TODO: Maybe tear down the goroutine for this client/server pair?
|
||||
}
|
||||
continue
|
||||
}
|
||||
err = svrConn.SetDeadline(time.Now().Add(myInfo.timeout))
|
||||
if err != nil {
|
||||
glog.Errorf("SetDeadline failed: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (udp *udpProxySocket) getBackendConn(activeClients *clientCache, cliAddr net.Addr, proxier *Proxier, service proxy.ServicePortName, timeout time.Duration) (net.Conn, error) {
|
||||
activeClients.mu.Lock()
|
||||
defer activeClients.mu.Unlock()
|
||||
|
||||
svrConn, found := activeClients.clients[cliAddr.String()]
|
||||
if !found {
|
||||
// TODO: This could spin up a new goroutine to make the outbound connection,
|
||||
// and keep accepting inbound traffic.
|
||||
glog.V(3).Infof("New UDP connection from %s", cliAddr)
|
||||
var err error
|
||||
svrConn, err = tryConnect(service, cliAddr, "udp", proxier)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = svrConn.SetDeadline(time.Now().Add(timeout)); err != nil {
|
||||
glog.Errorf("SetDeadline failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
activeClients.clients[cliAddr.String()] = svrConn
|
||||
go func(cliAddr net.Addr, svrConn net.Conn, activeClients *clientCache, timeout time.Duration) {
|
||||
defer runtime.HandleCrash()
|
||||
udp.proxyClient(cliAddr, svrConn, activeClients, timeout)
|
||||
}(cliAddr, svrConn, activeClients, timeout)
|
||||
}
|
||||
return svrConn, nil
|
||||
}
|
||||
|
||||
// This function is expected to be called as a goroutine.
|
||||
// TODO: Track and log bytes copied, like TCP
|
||||
func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activeClients *clientCache, timeout time.Duration) {
|
||||
defer svrConn.Close()
|
||||
var buffer [4096]byte
|
||||
for {
|
||||
n, err := svrConn.Read(buffer[0:])
|
||||
if err != nil {
|
||||
if !logTimeout(err) {
|
||||
glog.Errorf("Read failed: %v", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
err = svrConn.SetDeadline(time.Now().Add(timeout))
|
||||
if err != nil {
|
||||
glog.Errorf("SetDeadline failed: %v", err)
|
||||
break
|
||||
}
|
||||
n, err = udp.WriteTo(buffer[0:n], cliAddr)
|
||||
if err != nil {
|
||||
if !logTimeout(err) {
|
||||
glog.Errorf("WriteTo failed: %v", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
activeClients.mu.Lock()
|
||||
delete(activeClients.clients, cliAddr.String())
|
||||
activeClients.mu.Unlock()
|
||||
}
|
||||
326
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/roundrobin.go
generated
vendored
Normal file
326
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/roundrobin.go
generated
vendored
Normal file
|
|
@ -0,0 +1,326 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package winuserspace
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/slice"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrMissingServiceEntry = errors.New("missing service entry")
|
||||
ErrMissingEndpoints = errors.New("missing endpoints")
|
||||
)
|
||||
|
||||
type affinityState struct {
|
||||
clientIP string
|
||||
//clientProtocol api.Protocol //not yet used
|
||||
//sessionCookie string //not yet used
|
||||
endpoint string
|
||||
lastUsed time.Time
|
||||
}
|
||||
|
||||
type affinityPolicy struct {
|
||||
affinityType api.ServiceAffinity
|
||||
affinityMap map[string]*affinityState // map client IP -> affinity info
|
||||
ttlMinutes int
|
||||
}
|
||||
|
||||
// LoadBalancerRR is a round-robin load balancer.
|
||||
type LoadBalancerRR struct {
|
||||
lock sync.RWMutex
|
||||
services map[proxy.ServicePortName]*balancerState
|
||||
}
|
||||
|
||||
// Ensure this implements LoadBalancer.
|
||||
var _ LoadBalancer = &LoadBalancerRR{}
|
||||
|
||||
type balancerState struct {
|
||||
endpoints []string // a list of "ip:port" style strings
|
||||
index int // current index into endpoints
|
||||
affinity affinityPolicy
|
||||
}
|
||||
|
||||
func newAffinityPolicy(affinityType api.ServiceAffinity, ttlMinutes int) *affinityPolicy {
|
||||
return &affinityPolicy{
|
||||
affinityType: affinityType,
|
||||
affinityMap: make(map[string]*affinityState),
|
||||
ttlMinutes: ttlMinutes,
|
||||
}
|
||||
}
|
||||
|
||||
// NewLoadBalancerRR returns a new LoadBalancerRR.
|
||||
func NewLoadBalancerRR() *LoadBalancerRR {
|
||||
return &LoadBalancerRR{
|
||||
services: map[proxy.ServicePortName]*balancerState{},
|
||||
}
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlMinutes int) error {
|
||||
glog.V(4).Infof("LoadBalancerRR NewService %q", svcPort)
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
lb.newServiceInternal(svcPort, affinityType, ttlMinutes)
|
||||
return nil
|
||||
}
|
||||
|
||||
// This assumes that lb.lock is already held.
|
||||
func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlMinutes int) *balancerState {
|
||||
if ttlMinutes == 0 {
|
||||
ttlMinutes = 180 //default to 3 hours if not specified. Should 0 be unlimited instead????
|
||||
}
|
||||
|
||||
if _, exists := lb.services[svcPort]; !exists {
|
||||
lb.services[svcPort] = &balancerState{affinity: *newAffinityPolicy(affinityType, ttlMinutes)}
|
||||
glog.V(4).Infof("LoadBalancerRR service %q did not exist, created", svcPort)
|
||||
} else if affinityType != "" {
|
||||
lb.services[svcPort].affinity.affinityType = affinityType
|
||||
}
|
||||
return lb.services[svcPort]
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) DeleteService(svcPort proxy.ServicePortName) {
|
||||
glog.V(4).Infof("LoadBalancerRR DeleteService %q", svcPort)
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
delete(lb.services, svcPort)
|
||||
}
|
||||
|
||||
// return true if this service is using some form of session affinity.
|
||||
func isSessionAffinity(affinity *affinityPolicy) bool {
|
||||
// Should never be empty string, but checking for it to be safe.
|
||||
if affinity.affinityType == "" || affinity.affinityType == api.ServiceAffinityNone {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// NextEndpoint returns a service endpoint.
|
||||
// The service endpoint is chosen using the round-robin algorithm.
|
||||
func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error) {
|
||||
// Coarse locking is simple. We can get more fine-grained if/when we
|
||||
// can prove it matters.
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
|
||||
state, exists := lb.services[svcPort]
|
||||
if !exists || state == nil {
|
||||
return "", ErrMissingServiceEntry
|
||||
}
|
||||
if len(state.endpoints) == 0 {
|
||||
return "", ErrMissingEndpoints
|
||||
}
|
||||
glog.V(4).Infof("NextEndpoint for service %q, srcAddr=%v: endpoints: %+v", svcPort, srcAddr, state.endpoints)
|
||||
|
||||
sessionAffinityEnabled := isSessionAffinity(&state.affinity)
|
||||
|
||||
var ipaddr string
|
||||
if sessionAffinityEnabled {
|
||||
// Caution: don't shadow ipaddr
|
||||
var err error
|
||||
ipaddr, _, err = net.SplitHostPort(srcAddr.String())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("malformed source address %q: %v", srcAddr.String(), err)
|
||||
}
|
||||
if !sessionAffinityReset {
|
||||
sessionAffinity, exists := state.affinity.affinityMap[ipaddr]
|
||||
if exists && int(time.Now().Sub(sessionAffinity.lastUsed).Minutes()) < state.affinity.ttlMinutes {
|
||||
// Affinity wins.
|
||||
endpoint := sessionAffinity.endpoint
|
||||
sessionAffinity.lastUsed = time.Now()
|
||||
glog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint)
|
||||
return endpoint, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// Take the next endpoint.
|
||||
endpoint := state.endpoints[state.index]
|
||||
state.index = (state.index + 1) % len(state.endpoints)
|
||||
|
||||
if sessionAffinityEnabled {
|
||||
var affinity *affinityState
|
||||
affinity = state.affinity.affinityMap[ipaddr]
|
||||
if affinity == nil {
|
||||
affinity = new(affinityState) //&affinityState{ipaddr, "TCP", "", endpoint, time.Now()}
|
||||
state.affinity.affinityMap[ipaddr] = affinity
|
||||
}
|
||||
affinity.lastUsed = time.Now()
|
||||
affinity.endpoint = endpoint
|
||||
affinity.clientIP = ipaddr
|
||||
glog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr])
|
||||
}
|
||||
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
type hostPortPair struct {
|
||||
host string
|
||||
port int
|
||||
}
|
||||
|
||||
func isValidEndpoint(hpp *hostPortPair) bool {
|
||||
return hpp.host != "" && hpp.port > 0
|
||||
}
|
||||
|
||||
func flattenValidEndpoints(endpoints []hostPortPair) []string {
|
||||
// Convert Endpoint objects into strings for easier use later. Ignore
|
||||
// the protocol field - we'll get that from the Service objects.
|
||||
var result []string
|
||||
for i := range endpoints {
|
||||
hpp := &endpoints[i]
|
||||
if isValidEndpoint(hpp) {
|
||||
result = append(result, net.JoinHostPort(hpp.host, strconv.Itoa(hpp.port)))
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Remove any session affinity records associated to a particular endpoint (for example when a pod goes down).
|
||||
func removeSessionAffinityByEndpoint(state *balancerState, svcPort proxy.ServicePortName, endpoint string) {
|
||||
for _, affinity := range state.affinity.affinityMap {
|
||||
if affinity.endpoint == endpoint {
|
||||
glog.V(4).Infof("Removing client: %s from affinityMap for service %q", affinity.endpoint, svcPort)
|
||||
delete(state.affinity.affinityMap, affinity.clientIP)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Loop through the valid endpoints and then the endpoints associated with the Load Balancer.
|
||||
// Then remove any session affinity records that are not in both lists.
|
||||
// This assumes the lb.lock is held.
|
||||
func (lb *LoadBalancerRR) updateAffinityMap(svcPort proxy.ServicePortName, newEndpoints []string) {
|
||||
allEndpoints := map[string]int{}
|
||||
for _, newEndpoint := range newEndpoints {
|
||||
allEndpoints[newEndpoint] = 1
|
||||
}
|
||||
state, exists := lb.services[svcPort]
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
for _, existingEndpoint := range state.endpoints {
|
||||
allEndpoints[existingEndpoint] = allEndpoints[existingEndpoint] + 1
|
||||
}
|
||||
for mKey, mVal := range allEndpoints {
|
||||
if mVal == 1 {
|
||||
glog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort)
|
||||
removeSessionAffinityByEndpoint(state, svcPort, mKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OnEndpointsUpdate manages the registered service endpoints.
|
||||
// Registered endpoints are updated if found in the update set or
|
||||
// unregistered if missing from the update set.
|
||||
func (lb *LoadBalancerRR) OnEndpointsUpdate(allEndpoints []api.Endpoints) {
|
||||
registeredEndpoints := make(map[proxy.ServicePortName]bool)
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
|
||||
// Update endpoints for services.
|
||||
for i := range allEndpoints {
|
||||
svcEndpoints := &allEndpoints[i]
|
||||
|
||||
// We need to build a map of portname -> all ip:ports for that
|
||||
// portname. Explode Endpoints.Subsets[*] into this structure.
|
||||
portsToEndpoints := map[string][]hostPortPair{}
|
||||
for i := range svcEndpoints.Subsets {
|
||||
ss := &svcEndpoints.Subsets[i]
|
||||
for i := range ss.Ports {
|
||||
port := &ss.Ports[i]
|
||||
for i := range ss.Addresses {
|
||||
addr := &ss.Addresses[i]
|
||||
portsToEndpoints[port.Name] = append(portsToEndpoints[port.Name], hostPortPair{addr.IP, int(port.Port)})
|
||||
// Ignore the protocol field - we'll get that from the Service objects.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for portname := range portsToEndpoints {
|
||||
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: svcEndpoints.Namespace, Name: svcEndpoints.Name}, Port: portname}
|
||||
state, exists := lb.services[svcPort]
|
||||
curEndpoints := []string{}
|
||||
if state != nil {
|
||||
curEndpoints = state.endpoints
|
||||
}
|
||||
newEndpoints := flattenValidEndpoints(portsToEndpoints[portname])
|
||||
|
||||
if !exists || state == nil || len(curEndpoints) != len(newEndpoints) || !slicesEquiv(slice.CopyStrings(curEndpoints), newEndpoints) {
|
||||
glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
|
||||
lb.updateAffinityMap(svcPort, newEndpoints)
|
||||
// OnEndpointsUpdate can be called without NewService being called externally.
|
||||
// To be safe we will call it here. A new service will only be created
|
||||
// if one does not already exist. The affinity will be updated
|
||||
// later, once NewService is called.
|
||||
state = lb.newServiceInternal(svcPort, api.ServiceAffinity(""), 0)
|
||||
state.endpoints = slice.ShuffleStrings(newEndpoints)
|
||||
|
||||
// Reset the round-robin index.
|
||||
state.index = 0
|
||||
}
|
||||
registeredEndpoints[svcPort] = true
|
||||
}
|
||||
}
|
||||
// Remove endpoints missing from the update.
|
||||
for k := range lb.services {
|
||||
if _, exists := registeredEndpoints[k]; !exists {
|
||||
glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", k)
|
||||
// Reset but don't delete.
|
||||
state := lb.services[k]
|
||||
state.endpoints = []string{}
|
||||
state.index = 0
|
||||
state.affinity.affinityMap = map[string]*affinityState{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests whether two slices are equivalent. This sorts both slices in-place.
|
||||
func slicesEquiv(lhs, rhs []string) bool {
|
||||
if len(lhs) != len(rhs) {
|
||||
return false
|
||||
}
|
||||
if reflect.DeepEqual(slice.SortStrings(lhs), slice.SortStrings(rhs)) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortName) {
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
|
||||
state, exists := lb.services[svcPort]
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
for ip, affinity := range state.affinity.affinityMap {
|
||||
if int(time.Now().Sub(affinity.lastUsed).Minutes()) >= state.affinity.ttlMinutes {
|
||||
glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort)
|
||||
delete(state.affinity.affinityMap, ip)
|
||||
}
|
||||
}
|
||||
}
|
||||
727
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/roundrobin_test.go
generated
vendored
Normal file
727
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/roundrobin_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
47
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/udp_server.go
generated
vendored
Normal file
47
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/udp_server.go
generated
vendored
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package winuserspace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
)
|
||||
|
||||
// udpEchoServer is a simple echo server in UDP, intended for testing the proxy.
|
||||
type udpEchoServer struct {
|
||||
net.PacketConn
|
||||
}
|
||||
|
||||
func (r *udpEchoServer) Loop() {
|
||||
var buffer [4096]byte
|
||||
for {
|
||||
n, cliAddr, err := r.ReadFrom(buffer[0:])
|
||||
if err != nil {
|
||||
fmt.Printf("ReadFrom failed: %v\n", err)
|
||||
continue
|
||||
}
|
||||
r.WriteTo(buffer[0:n], cliAddr)
|
||||
}
|
||||
}
|
||||
|
||||
func newUDPEchoServer() (*udpEchoServer, error) {
|
||||
packetconn, err := net.ListenPacket("udp", ":0")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &udpEchoServer{packetconn}, nil
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue