Add glide.yaml and vendor deps
This commit is contained in:
parent
db918f12ad
commit
5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions
65
vendor/k8s.io/kubernetes/pkg/storage/etcd3/BUILD
generated
vendored
Normal file
65
vendor/k8s.io/kubernetes/pkg/storage/etcd3/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"compact.go",
|
||||
"event.go",
|
||||
"store.go",
|
||||
"watcher.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/meta:go_default_library",
|
||||
"//pkg/apis/meta/v1:go_default_library",
|
||||
"//pkg/conversion:go_default_library",
|
||||
"//pkg/runtime:go_default_library",
|
||||
"//pkg/storage:go_default_library",
|
||||
"//pkg/storage/etcd:go_default_library",
|
||||
"//pkg/util:go_default_library",
|
||||
"//pkg/watch:go_default_library",
|
||||
"//vendor:github.com/coreos/etcd/clientv3",
|
||||
"//vendor:github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
||||
"//vendor:github.com/coreos/etcd/mvcc/mvccpb",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:golang.org/x/net/context",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"compact_test.go",
|
||||
"store_test.go",
|
||||
"watcher_test.go",
|
||||
],
|
||||
library = "go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/apis/meta/v1:go_default_library",
|
||||
"//pkg/fields:go_default_library",
|
||||
"//pkg/labels:go_default_library",
|
||||
"//pkg/runtime:go_default_library",
|
||||
"//pkg/runtime/schema:go_default_library",
|
||||
"//pkg/storage:go_default_library",
|
||||
"//pkg/util/wait:go_default_library",
|
||||
"//pkg/watch:go_default_library",
|
||||
"//vendor:github.com/coreos/etcd/clientv3",
|
||||
"//vendor:github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
||||
"//vendor:github.com/coreos/etcd/integration",
|
||||
"//vendor:golang.org/x/net/context",
|
||||
],
|
||||
)
|
||||
161
vendor/k8s.io/kubernetes/pkg/storage/etcd3/compact.go
generated
vendored
Normal file
161
vendor/k8s.io/kubernetes/pkg/storage/etcd3/compact.go
generated
vendored
Normal file
|
|
@ -0,0 +1,161 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package etcd3
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
compactInterval = 5 * time.Minute
|
||||
compactRevKey = "compact_rev_key"
|
||||
)
|
||||
|
||||
var (
|
||||
endpointsMapMu sync.Mutex
|
||||
endpointsMap map[string]struct{}
|
||||
)
|
||||
|
||||
func init() {
|
||||
endpointsMap = make(map[string]struct{})
|
||||
}
|
||||
|
||||
// StartCompactor starts a compactor in the background to compact old version of keys that's not needed.
|
||||
// By default, we save the most recent 10 minutes data and compact versions > 10minutes ago.
|
||||
// It should be enough for slow watchers and to tolerate burst.
|
||||
// TODO: We might keep a longer history (12h) in the future once storage API can take advantage of past version of keys.
|
||||
func StartCompactor(ctx context.Context, client *clientv3.Client) {
|
||||
endpointsMapMu.Lock()
|
||||
defer endpointsMapMu.Unlock()
|
||||
|
||||
// In one process, we can have only one compactor for one cluster.
|
||||
// Currently we rely on endpoints to differentiate clusters.
|
||||
for _, ep := range client.Endpoints() {
|
||||
if _, ok := endpointsMap[ep]; ok {
|
||||
glog.V(4).Infof("compactor already exists for endpoints %v", client.Endpoints())
|
||||
return
|
||||
}
|
||||
}
|
||||
for _, ep := range client.Endpoints() {
|
||||
endpointsMap[ep] = struct{}{}
|
||||
}
|
||||
|
||||
go compactor(ctx, client, compactInterval)
|
||||
}
|
||||
|
||||
// compactor periodically compacts historical versions of keys in etcd.
|
||||
// It will compact keys with versions older than given interval.
|
||||
// In other words, after compaction, it will only contain keys set during last interval.
|
||||
// Any API call for the older versions of keys will return error.
|
||||
// Interval is the time interval between each compaction. The first compaction happens after "interval".
|
||||
func compactor(ctx context.Context, client *clientv3.Client, interval time.Duration) {
|
||||
// Technical definitions:
|
||||
// We have a special key in etcd defined as *compactRevKey*.
|
||||
// compactRevKey's value will be set to the string of last compacted revision.
|
||||
// compactRevKey's version will be used as logical time for comparison. THe version is referred as compact time.
|
||||
// Initially, because the key doesn't exist, the compact time (version) is 0.
|
||||
//
|
||||
// Algorithm:
|
||||
// - Compare to see if (local compact_time) = (remote compact_time).
|
||||
// - If yes, increment both local and remote compact_time, and do a compaction.
|
||||
// - If not, set local to remote compact_time.
|
||||
//
|
||||
// Technical details/insights:
|
||||
//
|
||||
// The protocol here is lease based. If one compactor CAS successfully, the others would know it when they fail in
|
||||
// CAS later and would try again in 10 minutes. If an APIServer crashed, another one would "take over" the lease.
|
||||
//
|
||||
// For example, in the following diagram, we have a compactor C1 doing compaction in t1, t2. Another compactor C2
|
||||
// at t1' (t1 < t1' < t2) would CAS fail, set its known oldRev to rev at t1', and try again in t2' (t2' > t2).
|
||||
// If C1 crashed and wouldn't compact at t2, C2 would CAS successfully at t2'.
|
||||
//
|
||||
// oldRev(t2) curRev(t2)
|
||||
// +
|
||||
// oldRev curRev |
|
||||
// + + |
|
||||
// | | |
|
||||
// | | t1' | t2'
|
||||
// +---v-------------v----^---------v------^---->
|
||||
// t0 t1 t2
|
||||
//
|
||||
// We have the guarantees:
|
||||
// - in normal cases, the interval is 10 minutes.
|
||||
// - in failover, the interval is >10m and <20m
|
||||
//
|
||||
// FAQ:
|
||||
// - What if time is not accurate? We don't care as long as someone did the compaction. Atomicity is ensured using
|
||||
// etcd API.
|
||||
// - What happened under heavy load scenarios? Initially, each apiserver will do only one compaction
|
||||
// every 10 minutes. This is very unlikely affecting or affected w.r.t. server load.
|
||||
|
||||
var compactTime int64
|
||||
var rev int64
|
||||
var err error
|
||||
for {
|
||||
select {
|
||||
case <-time.After(interval):
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
compactTime, rev, err = compact(ctx, client, compactTime, rev)
|
||||
if err != nil {
|
||||
glog.Errorf("etcd: endpoint (%v) compact failed: %v", client.Endpoints(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// compact compacts etcd store and returns current rev.
|
||||
// It will return the current compact time and global revision if no error occurred.
|
||||
// Note that CAS fail will not incur any error.
|
||||
func compact(ctx context.Context, client *clientv3.Client, t, rev int64) (int64, int64, error) {
|
||||
resp, err := client.KV.Txn(ctx).If(
|
||||
clientv3.Compare(clientv3.Version(compactRevKey), "=", t),
|
||||
).Then(
|
||||
clientv3.OpPut(compactRevKey, strconv.FormatInt(rev, 10)), // Expect side effect: increment Version
|
||||
).Else(
|
||||
clientv3.OpGet(compactRevKey),
|
||||
).Commit()
|
||||
if err != nil {
|
||||
return t, rev, err
|
||||
}
|
||||
|
||||
curRev := resp.Header.Revision
|
||||
|
||||
if !resp.Succeeded {
|
||||
curTime := resp.Responses[0].GetResponseRange().Kvs[0].Version
|
||||
return curTime, curRev, nil
|
||||
}
|
||||
curTime := t + 1
|
||||
|
||||
if rev == 0 {
|
||||
// We don't compact on bootstrap.
|
||||
return curTime, curRev, nil
|
||||
}
|
||||
if _, err = client.Compact(ctx, rev); err != nil {
|
||||
return curTime, curRev, err
|
||||
}
|
||||
glog.Infof("etcd: compacted rev (%d), endpoints (%v)", rev, client.Endpoints())
|
||||
return curTime, curRev, nil
|
||||
}
|
||||
87
vendor/k8s.io/kubernetes/pkg/storage/etcd3/compact_test.go
generated
vendored
Normal file
87
vendor/k8s.io/kubernetes/pkg/storage/etcd3/compact_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package etcd3
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
etcdrpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"github.com/coreos/etcd/integration"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestCompact(t *testing.T) {
|
||||
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer cluster.Terminate(t)
|
||||
client := cluster.RandClient()
|
||||
ctx := context.Background()
|
||||
|
||||
putResp, err := client.Put(ctx, "/somekey", "data")
|
||||
if err != nil {
|
||||
t.Fatalf("Put failed: %v", err)
|
||||
}
|
||||
|
||||
putResp1, err := client.Put(ctx, "/somekey", "data2")
|
||||
if err != nil {
|
||||
t.Fatalf("Put failed: %v", err)
|
||||
}
|
||||
|
||||
_, _, err = compact(ctx, client, 0, putResp1.Header.Revision)
|
||||
if err != nil {
|
||||
t.Fatalf("compact failed: %v", err)
|
||||
}
|
||||
|
||||
obj, err := client.Get(ctx, "/somekey", clientv3.WithRev(putResp.Header.Revision))
|
||||
if err != etcdrpc.ErrCompacted {
|
||||
t.Errorf("Expecting ErrCompacted, but get=%v err=%v", obj, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCompactConflict tests that two compactors (Let's use C1, C2) are trying to compact etcd cluster with the same
|
||||
// logical time.
|
||||
// - C1 compacts first. It will succeed.
|
||||
// - C2 compacts after. It will fail. But it will get latest logical time, which should be larger by one.
|
||||
func TestCompactConflict(t *testing.T) {
|
||||
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer cluster.Terminate(t)
|
||||
client := cluster.RandClient()
|
||||
ctx := context.Background()
|
||||
|
||||
putResp, err := client.Put(ctx, "/somekey", "data")
|
||||
if err != nil {
|
||||
t.Fatalf("Put failed: %v", err)
|
||||
}
|
||||
|
||||
// Compact first. It would do the compaction and return compact time which is incremented by 1.
|
||||
curTime, _, err := compact(ctx, client, 0, putResp.Header.Revision)
|
||||
if err != nil {
|
||||
t.Fatalf("compact failed: %v", err)
|
||||
}
|
||||
if curTime != 1 {
|
||||
t.Errorf("Expect current logical time = 1, get = %v", curTime)
|
||||
}
|
||||
|
||||
// Compact again with the same parameters. It won't do compaction but return the latest compact time.
|
||||
curTime2, _, err := compact(ctx, client, 0, putResp.Header.Revision)
|
||||
if err != nil {
|
||||
t.Fatalf("compact failed: %v", err)
|
||||
}
|
||||
if curTime != curTime2 {
|
||||
t.Errorf("Unexpected curTime (%v) != curTime2 (%v)", curTime, curTime2)
|
||||
}
|
||||
}
|
||||
57
vendor/k8s.io/kubernetes/pkg/storage/etcd3/event.go
generated
vendored
Normal file
57
vendor/k8s.io/kubernetes/pkg/storage/etcd3/event.go
generated
vendored
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package etcd3
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
)
|
||||
|
||||
type event struct {
|
||||
key string
|
||||
value []byte
|
||||
prevValue []byte
|
||||
rev int64
|
||||
isDeleted bool
|
||||
isCreated bool
|
||||
}
|
||||
|
||||
// parseKV converts a KeyValue retrieved from an initial sync() listing to a synthetic isCreated event.
|
||||
func parseKV(kv *mvccpb.KeyValue) *event {
|
||||
return &event{
|
||||
key: string(kv.Key),
|
||||
value: kv.Value,
|
||||
prevValue: nil,
|
||||
rev: kv.ModRevision,
|
||||
isDeleted: false,
|
||||
isCreated: true,
|
||||
}
|
||||
}
|
||||
|
||||
func parseEvent(e *clientv3.Event) *event {
|
||||
ret := &event{
|
||||
key: string(e.Kv.Key),
|
||||
value: e.Kv.Value,
|
||||
rev: e.Kv.ModRevision,
|
||||
isDeleted: e.Type == clientv3.EventTypeDelete,
|
||||
isCreated: e.IsCreate(),
|
||||
}
|
||||
if e.PrevKv != nil {
|
||||
ret.prevValue = e.PrevKv.Value
|
||||
}
|
||||
return ret
|
||||
}
|
||||
520
vendor/k8s.io/kubernetes/pkg/storage/etcd3/store.go
generated
vendored
Normal file
520
vendor/k8s.io/kubernetes/pkg/storage/etcd3/store.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
557
vendor/k8s.io/kubernetes/pkg/storage/etcd3/store_test.go
generated
vendored
Normal file
557
vendor/k8s.io/kubernetes/pkg/storage/etcd3/store_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
375
vendor/k8s.io/kubernetes/pkg/storage/etcd3/watcher.go
generated
vendored
Normal file
375
vendor/k8s.io/kubernetes/pkg/storage/etcd3/watcher.go
generated
vendored
Normal file
|
|
@ -0,0 +1,375 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package etcd3
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/storage"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
etcdrpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
// We have set a buffer in order to reduce times of context switches.
|
||||
incomingBufSize = 100
|
||||
outgoingBufSize = 100
|
||||
)
|
||||
|
||||
type watcher struct {
|
||||
client *clientv3.Client
|
||||
codec runtime.Codec
|
||||
versioner storage.Versioner
|
||||
}
|
||||
|
||||
// watchChan implements watch.Interface.
|
||||
type watchChan struct {
|
||||
watcher *watcher
|
||||
key string
|
||||
initialRev int64
|
||||
recursive bool
|
||||
internalFilter storage.FilterFunc
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
incomingEventChan chan *event
|
||||
resultChan chan watch.Event
|
||||
errChan chan error
|
||||
}
|
||||
|
||||
func newWatcher(client *clientv3.Client, codec runtime.Codec, versioner storage.Versioner) *watcher {
|
||||
return &watcher{
|
||||
client: client,
|
||||
codec: codec,
|
||||
versioner: versioner,
|
||||
}
|
||||
}
|
||||
|
||||
// Watch watches on a key and returns a watch.Interface that transfers relevant notifications.
|
||||
// If rev is zero, it will return the existing object(s) and then start watching from
|
||||
// the maximum revision+1 from returned objects.
|
||||
// If rev is non-zero, it will watch events happened after given revision.
|
||||
// If recursive is false, it watches on given key.
|
||||
// If recursive is true, it watches any children and directories under the key, excluding the root key itself.
|
||||
// pred must be non-nil. Only if pred matches the change, it will be returned.
|
||||
func (w *watcher) Watch(ctx context.Context, key string, rev int64, recursive bool, pred storage.SelectionPredicate) (watch.Interface, error) {
|
||||
if recursive && !strings.HasSuffix(key, "/") {
|
||||
key += "/"
|
||||
}
|
||||
wc := w.createWatchChan(ctx, key, rev, recursive, pred)
|
||||
go wc.run()
|
||||
return wc, nil
|
||||
}
|
||||
|
||||
func (w *watcher) createWatchChan(ctx context.Context, key string, rev int64, recursive bool, pred storage.SelectionPredicate) *watchChan {
|
||||
wc := &watchChan{
|
||||
watcher: w,
|
||||
key: key,
|
||||
initialRev: rev,
|
||||
recursive: recursive,
|
||||
internalFilter: storage.SimpleFilter(pred),
|
||||
incomingEventChan: make(chan *event, incomingBufSize),
|
||||
resultChan: make(chan watch.Event, outgoingBufSize),
|
||||
errChan: make(chan error, 1),
|
||||
}
|
||||
if pred.Label.Empty() && pred.Field.Empty() {
|
||||
// The filter doesn't filter out any object.
|
||||
wc.internalFilter = nil
|
||||
}
|
||||
wc.ctx, wc.cancel = context.WithCancel(ctx)
|
||||
return wc
|
||||
}
|
||||
|
||||
func (wc *watchChan) run() {
|
||||
watchClosedCh := make(chan struct{})
|
||||
go wc.startWatching(watchClosedCh)
|
||||
|
||||
var resultChanWG sync.WaitGroup
|
||||
resultChanWG.Add(1)
|
||||
go wc.processEvent(&resultChanWG)
|
||||
|
||||
select {
|
||||
case err := <-wc.errChan:
|
||||
if err == context.Canceled {
|
||||
break
|
||||
}
|
||||
errResult := parseError(err)
|
||||
if errResult != nil {
|
||||
// error result is guaranteed to be received by user before closing ResultChan.
|
||||
select {
|
||||
case wc.resultChan <- *errResult:
|
||||
case <-wc.ctx.Done(): // user has given up all results
|
||||
}
|
||||
}
|
||||
case <-watchClosedCh:
|
||||
case <-wc.ctx.Done(): // user cancel
|
||||
}
|
||||
|
||||
// We use wc.ctx to reap all goroutines. Under whatever condition, we should stop them all.
|
||||
// It's fine to double cancel.
|
||||
wc.cancel()
|
||||
|
||||
// we need to wait until resultChan wouldn't be used anymore
|
||||
resultChanWG.Wait()
|
||||
close(wc.resultChan)
|
||||
}
|
||||
|
||||
func (wc *watchChan) Stop() {
|
||||
wc.cancel()
|
||||
}
|
||||
|
||||
func (wc *watchChan) ResultChan() <-chan watch.Event {
|
||||
return wc.resultChan
|
||||
}
|
||||
|
||||
// sync tries to retrieve existing data and send them to process.
|
||||
// The revision to watch will be set to the revision in response.
|
||||
// All events sent will have isCreated=true
|
||||
func (wc *watchChan) sync() error {
|
||||
opts := []clientv3.OpOption{}
|
||||
if wc.recursive {
|
||||
opts = append(opts, clientv3.WithPrefix())
|
||||
}
|
||||
getResp, err := wc.watcher.client.Get(wc.ctx, wc.key, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wc.initialRev = getResp.Header.Revision
|
||||
for _, kv := range getResp.Kvs {
|
||||
wc.sendEvent(parseKV(kv))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// startWatching does:
|
||||
// - get current objects if initialRev=0; set initialRev to current rev
|
||||
// - watch on given key and send events to process.
|
||||
func (wc *watchChan) startWatching(watchClosedCh chan struct{}) {
|
||||
if wc.initialRev == 0 {
|
||||
if err := wc.sync(); err != nil {
|
||||
glog.Errorf("failed to sync with latest state: %v", err)
|
||||
wc.sendError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
opts := []clientv3.OpOption{clientv3.WithRev(wc.initialRev + 1), clientv3.WithPrevKV()}
|
||||
if wc.recursive {
|
||||
opts = append(opts, clientv3.WithPrefix())
|
||||
}
|
||||
wch := wc.watcher.client.Watch(wc.ctx, wc.key, opts...)
|
||||
for wres := range wch {
|
||||
if wres.Err() != nil {
|
||||
err := wres.Err()
|
||||
// If there is an error on server (e.g. compaction), the channel will return it before closed.
|
||||
glog.Errorf("watch chan error: %v", err)
|
||||
wc.sendError(err)
|
||||
return
|
||||
}
|
||||
for _, e := range wres.Events {
|
||||
wc.sendEvent(parseEvent(e))
|
||||
}
|
||||
}
|
||||
// When we come to this point, it's only possible that client side ends the watch.
|
||||
// e.g. cancel the context, close the client.
|
||||
// If this watch chan is broken and context isn't cancelled, other goroutines will still hang.
|
||||
// We should notify the main thread that this goroutine has exited.
|
||||
close(watchClosedCh)
|
||||
}
|
||||
|
||||
// processEvent processes events from etcd watcher and sends results to resultChan.
|
||||
func (wc *watchChan) processEvent(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case e := <-wc.incomingEventChan:
|
||||
res := wc.transform(e)
|
||||
if res == nil {
|
||||
continue
|
||||
}
|
||||
if len(wc.resultChan) == outgoingBufSize {
|
||||
glog.Warningf("Fast watcher, slow processing. Number of buffered events: %d."+
|
||||
"Probably caused by slow dispatching events to watchers", outgoingBufSize)
|
||||
}
|
||||
// If user couldn't receive results fast enough, we also block incoming events from watcher.
|
||||
// Because storing events in local will cause more memory usage.
|
||||
// The worst case would be closing the fast watcher.
|
||||
select {
|
||||
case wc.resultChan <- *res:
|
||||
case <-wc.ctx.Done():
|
||||
return
|
||||
}
|
||||
case <-wc.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (wc *watchChan) filter(obj runtime.Object) bool {
|
||||
if wc.internalFilter == nil {
|
||||
return true
|
||||
}
|
||||
return wc.internalFilter(obj)
|
||||
}
|
||||
|
||||
func (wc *watchChan) acceptAll() bool {
|
||||
return wc.internalFilter == nil
|
||||
}
|
||||
|
||||
// transform transforms an event into a result for user if not filtered.
|
||||
func (wc *watchChan) transform(e *event) (res *watch.Event) {
|
||||
curObj, oldObj, err := wc.prepareObjs(e)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to prepare current and previous objects: %v", err)
|
||||
wc.sendError(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
switch {
|
||||
case e.isDeleted:
|
||||
if !wc.filter(oldObj) {
|
||||
return nil
|
||||
}
|
||||
res = &watch.Event{
|
||||
Type: watch.Deleted,
|
||||
Object: oldObj,
|
||||
}
|
||||
case e.isCreated:
|
||||
if !wc.filter(curObj) {
|
||||
return nil
|
||||
}
|
||||
res = &watch.Event{
|
||||
Type: watch.Added,
|
||||
Object: curObj,
|
||||
}
|
||||
default:
|
||||
if wc.acceptAll() {
|
||||
res = &watch.Event{
|
||||
Type: watch.Modified,
|
||||
Object: curObj,
|
||||
}
|
||||
return res
|
||||
}
|
||||
curObjPasses := wc.filter(curObj)
|
||||
oldObjPasses := wc.filter(oldObj)
|
||||
switch {
|
||||
case curObjPasses && oldObjPasses:
|
||||
res = &watch.Event{
|
||||
Type: watch.Modified,
|
||||
Object: curObj,
|
||||
}
|
||||
case curObjPasses && !oldObjPasses:
|
||||
res = &watch.Event{
|
||||
Type: watch.Added,
|
||||
Object: curObj,
|
||||
}
|
||||
case !curObjPasses && oldObjPasses:
|
||||
res = &watch.Event{
|
||||
Type: watch.Deleted,
|
||||
Object: oldObj,
|
||||
}
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func parseError(err error) *watch.Event {
|
||||
var status *metav1.Status
|
||||
switch {
|
||||
case err == etcdrpc.ErrCompacted:
|
||||
status = &metav1.Status{
|
||||
Status: metav1.StatusFailure,
|
||||
Message: err.Error(),
|
||||
Code: http.StatusGone,
|
||||
Reason: metav1.StatusReasonExpired,
|
||||
}
|
||||
default:
|
||||
status = &metav1.Status{
|
||||
Status: metav1.StatusFailure,
|
||||
Message: err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
Reason: metav1.StatusReasonInternalError,
|
||||
}
|
||||
}
|
||||
|
||||
return &watch.Event{
|
||||
Type: watch.Error,
|
||||
Object: status,
|
||||
}
|
||||
}
|
||||
|
||||
func (wc *watchChan) sendError(err error) {
|
||||
select {
|
||||
case wc.errChan <- err:
|
||||
case <-wc.ctx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
func (wc *watchChan) sendEvent(e *event) {
|
||||
if len(wc.incomingEventChan) == incomingBufSize {
|
||||
glog.Warningf("Fast watcher, slow processing. Number of buffered events: %d."+
|
||||
"Probably caused by slow decoding, user not receiving fast, or other processing logic",
|
||||
incomingBufSize)
|
||||
}
|
||||
select {
|
||||
case wc.incomingEventChan <- e:
|
||||
case <-wc.ctx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
func (wc *watchChan) prepareObjs(e *event) (curObj runtime.Object, oldObj runtime.Object, err error) {
|
||||
if !e.isDeleted {
|
||||
curObj, err = decodeObj(wc.watcher.codec, wc.watcher.versioner, e.value, e.rev)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
// We need to decode prevValue, only if this is deletion event or
|
||||
// the underlying filter doesn't accept all objects (otherwise we
|
||||
// know that the filter for previous object will return true and
|
||||
// we need the object only to compute whether it was filtered out
|
||||
// before).
|
||||
if len(e.prevValue) > 0 && (e.isDeleted || !wc.acceptAll()) {
|
||||
// Note that this sends the *old* object with the etcd revision for the time at
|
||||
// which it gets deleted.
|
||||
oldObj, err = decodeObj(wc.watcher.codec, wc.watcher.versioner, e.prevValue, e.rev)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return curObj, oldObj, nil
|
||||
}
|
||||
|
||||
func decodeObj(codec runtime.Codec, versioner storage.Versioner, data []byte, rev int64) (runtime.Object, error) {
|
||||
obj, err := runtime.Decode(codec, []byte(data))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// ensure resource version is set on the object we load from etcd
|
||||
if err := versioner.UpdateObject(obj, uint64(rev)); err != nil {
|
||||
return nil, fmt.Errorf("failure to version api object (%d) %#v: %v", rev, obj, err)
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
372
vendor/k8s.io/kubernetes/pkg/storage/etcd3/watcher_test.go
generated
vendored
Normal file
372
vendor/k8s.io/kubernetes/pkg/storage/etcd3/watcher_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,372 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package etcd3
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/integration"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/runtime/schema"
|
||||
"k8s.io/kubernetes/pkg/storage"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
||||
func TestWatch(t *testing.T) {
|
||||
testWatch(t, false)
|
||||
}
|
||||
|
||||
func TestWatchList(t *testing.T) {
|
||||
testWatch(t, true)
|
||||
}
|
||||
|
||||
// It tests that
|
||||
// - first occurrence of objects should notify Add event
|
||||
// - update should trigger Modified event
|
||||
// - update that gets filtered should trigger Deleted event
|
||||
func testWatch(t *testing.T, recursive bool) {
|
||||
ctx, store, cluster := testSetup(t)
|
||||
defer cluster.Terminate(t)
|
||||
podFoo := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}
|
||||
podBar := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "bar"}}
|
||||
|
||||
tests := []struct {
|
||||
key string
|
||||
pred storage.SelectionPredicate
|
||||
watchTests []*testWatchStruct
|
||||
}{{ // create a key
|
||||
key: "/somekey-1",
|
||||
watchTests: []*testWatchStruct{{podFoo, true, watch.Added}},
|
||||
pred: storage.Everything,
|
||||
}, { // create a key but obj gets filtered. Then update it with unfiltered obj
|
||||
key: "/somekey-3",
|
||||
watchTests: []*testWatchStruct{{podFoo, false, ""}, {podBar, true, watch.Added}},
|
||||
pred: storage.SelectionPredicate{
|
||||
Label: labels.Everything(),
|
||||
Field: fields.ParseSelectorOrDie("metadata.name=bar"),
|
||||
GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) {
|
||||
pod := obj.(*api.Pod)
|
||||
return nil, fields.Set{"metadata.name": pod.Name}, nil
|
||||
},
|
||||
},
|
||||
}, { // update
|
||||
key: "/somekey-4",
|
||||
watchTests: []*testWatchStruct{{podFoo, true, watch.Added}, {podBar, true, watch.Modified}},
|
||||
pred: storage.Everything,
|
||||
}, { // delete because of being filtered
|
||||
key: "/somekey-5",
|
||||
watchTests: []*testWatchStruct{{podFoo, true, watch.Added}, {podBar, true, watch.Deleted}},
|
||||
pred: storage.SelectionPredicate{
|
||||
Label: labels.Everything(),
|
||||
Field: fields.ParseSelectorOrDie("metadata.name!=bar"),
|
||||
GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) {
|
||||
pod := obj.(*api.Pod)
|
||||
return nil, fields.Set{"metadata.name": pod.Name}, nil
|
||||
},
|
||||
},
|
||||
}}
|
||||
for i, tt := range tests {
|
||||
w, err := store.watch(ctx, tt.key, "0", tt.pred, recursive)
|
||||
if err != nil {
|
||||
t.Fatalf("Watch failed: %v", err)
|
||||
}
|
||||
var prevObj *api.Pod
|
||||
for _, watchTest := range tt.watchTests {
|
||||
out := &api.Pod{}
|
||||
key := tt.key
|
||||
if recursive {
|
||||
key = key + "/item"
|
||||
}
|
||||
err := store.GuaranteedUpdate(ctx, key, out, true, nil, storage.SimpleUpdate(
|
||||
func(runtime.Object) (runtime.Object, error) {
|
||||
return watchTest.obj, nil
|
||||
}))
|
||||
if err != nil {
|
||||
t.Fatalf("GuaranteedUpdate failed: %v", err)
|
||||
}
|
||||
if watchTest.expectEvent {
|
||||
expectObj := out
|
||||
if watchTest.watchType == watch.Deleted {
|
||||
expectObj = prevObj
|
||||
expectObj.ResourceVersion = out.ResourceVersion
|
||||
}
|
||||
testCheckResult(t, i, watchTest.watchType, w, expectObj)
|
||||
}
|
||||
prevObj = out
|
||||
}
|
||||
w.Stop()
|
||||
testCheckStop(t, i, w)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteTriggerWatch(t *testing.T) {
|
||||
ctx, store, cluster := testSetup(t)
|
||||
defer cluster.Terminate(t)
|
||||
key, storedObj := testPropogateStore(ctx, t, store, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}})
|
||||
w, err := store.Watch(ctx, key, storedObj.ResourceVersion, storage.Everything)
|
||||
if err != nil {
|
||||
t.Fatalf("Watch failed: %v", err)
|
||||
}
|
||||
if err := store.Delete(ctx, key, &api.Pod{}, nil); err != nil {
|
||||
t.Fatalf("Delete failed: %v", err)
|
||||
}
|
||||
testCheckEventType(t, watch.Deleted, w)
|
||||
}
|
||||
|
||||
// TestWatchFromZero tests that
|
||||
// - watch from 0 should sync up and grab the object added before
|
||||
// - watch from 0 is able to return events for objects whose previous version has been compacted
|
||||
func TestWatchFromZero(t *testing.T) {
|
||||
ctx, store, cluster := testSetup(t)
|
||||
defer cluster.Terminate(t)
|
||||
key, storedObj := testPropogateStore(ctx, t, store, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "ns"}})
|
||||
|
||||
w, err := store.Watch(ctx, key, "0", storage.Everything)
|
||||
if err != nil {
|
||||
t.Fatalf("Watch failed: %v", err)
|
||||
}
|
||||
testCheckResult(t, 0, watch.Added, w, storedObj)
|
||||
w.Stop()
|
||||
|
||||
// Update
|
||||
out := &api.Pod{}
|
||||
err = store.GuaranteedUpdate(ctx, key, out, true, nil, storage.SimpleUpdate(
|
||||
func(runtime.Object) (runtime.Object, error) {
|
||||
return &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "ns", Annotations: map[string]string{"a": "1"}}}, nil
|
||||
}))
|
||||
if err != nil {
|
||||
t.Fatalf("GuaranteedUpdate failed: %v", err)
|
||||
}
|
||||
|
||||
// Make sure when we watch from 0 we receive an ADDED event
|
||||
w, err = store.Watch(ctx, key, "0", storage.Everything)
|
||||
if err != nil {
|
||||
t.Fatalf("Watch failed: %v", err)
|
||||
}
|
||||
testCheckResult(t, 1, watch.Added, w, out)
|
||||
w.Stop()
|
||||
|
||||
// Update again
|
||||
out = &api.Pod{}
|
||||
err = store.GuaranteedUpdate(ctx, key, out, true, nil, storage.SimpleUpdate(
|
||||
func(runtime.Object) (runtime.Object, error) {
|
||||
return &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "ns"}}, nil
|
||||
}))
|
||||
if err != nil {
|
||||
t.Fatalf("GuaranteedUpdate failed: %v", err)
|
||||
}
|
||||
|
||||
// Compact previous versions
|
||||
revToCompact, err := strconv.Atoi(out.ResourceVersion)
|
||||
if err != nil {
|
||||
t.Fatalf("Error converting %q to an int: %v", storedObj.ResourceVersion, err)
|
||||
}
|
||||
_, err = cluster.RandClient().Compact(ctx, int64(revToCompact), clientv3.WithCompactPhysical())
|
||||
if err != nil {
|
||||
t.Fatalf("Error compacting: %v", err)
|
||||
}
|
||||
|
||||
// Make sure we can still watch from 0 and receive an ADDED event
|
||||
w, err = store.Watch(ctx, key, "0", storage.Everything)
|
||||
if err != nil {
|
||||
t.Fatalf("Watch failed: %v", err)
|
||||
}
|
||||
testCheckResult(t, 2, watch.Added, w, out)
|
||||
}
|
||||
|
||||
// TestWatchFromNoneZero tests that
|
||||
// - watch from non-0 should just watch changes after given version
|
||||
func TestWatchFromNoneZero(t *testing.T) {
|
||||
ctx, store, cluster := testSetup(t)
|
||||
defer cluster.Terminate(t)
|
||||
key, storedObj := testPropogateStore(ctx, t, store, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}})
|
||||
|
||||
w, err := store.Watch(ctx, key, storedObj.ResourceVersion, storage.Everything)
|
||||
if err != nil {
|
||||
t.Fatalf("Watch failed: %v", err)
|
||||
}
|
||||
out := &api.Pod{}
|
||||
store.GuaranteedUpdate(ctx, key, out, true, nil, storage.SimpleUpdate(
|
||||
func(runtime.Object) (runtime.Object, error) {
|
||||
return &api.Pod{ObjectMeta: api.ObjectMeta{Name: "bar"}}, err
|
||||
}))
|
||||
testCheckResult(t, 0, watch.Modified, w, out)
|
||||
}
|
||||
|
||||
func TestWatchError(t *testing.T) {
|
||||
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer cluster.Terminate(t)
|
||||
invalidStore := newStore(cluster.RandClient(), false, &testCodec{testapi.Default.Codec()}, "")
|
||||
ctx := context.Background()
|
||||
w, err := invalidStore.Watch(ctx, "/abc", "0", storage.Everything)
|
||||
if err != nil {
|
||||
t.Fatalf("Watch failed: %v", err)
|
||||
}
|
||||
validStore := newStore(cluster.RandClient(), false, testapi.Default.Codec(), "")
|
||||
validStore.GuaranteedUpdate(ctx, "/abc", &api.Pod{}, true, nil, storage.SimpleUpdate(
|
||||
func(runtime.Object) (runtime.Object, error) {
|
||||
return &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}, nil
|
||||
}))
|
||||
testCheckEventType(t, watch.Error, w)
|
||||
}
|
||||
|
||||
func TestWatchContextCancel(t *testing.T) {
|
||||
ctx, store, cluster := testSetup(t)
|
||||
defer cluster.Terminate(t)
|
||||
canceledCtx, cancel := context.WithCancel(ctx)
|
||||
cancel()
|
||||
// When we watch with a canceled context, we should detect that it's context canceled.
|
||||
// We won't take it as error and also close the watcher.
|
||||
w, err := store.watcher.Watch(canceledCtx, "/abc", 0, false, storage.Everything)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
select {
|
||||
case _, ok := <-w.ResultChan():
|
||||
if ok {
|
||||
t.Error("ResultChan() should be closed")
|
||||
}
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
t.Errorf("timeout after %v", wait.ForeverTestTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWatchErrResultNotBlockAfterCancel(t *testing.T) {
|
||||
origCtx, store, cluster := testSetup(t)
|
||||
defer cluster.Terminate(t)
|
||||
ctx, cancel := context.WithCancel(origCtx)
|
||||
w := store.watcher.createWatchChan(ctx, "/abc", 0, false, storage.Everything)
|
||||
// make resutlChan and errChan blocking to ensure ordering.
|
||||
w.resultChan = make(chan watch.Event)
|
||||
w.errChan = make(chan error)
|
||||
// The event flow goes like:
|
||||
// - first we send an error, it should block on resultChan.
|
||||
// - Then we cancel ctx. The blocking on resultChan should be freed up
|
||||
// and run() goroutine should return.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
w.run()
|
||||
wg.Done()
|
||||
}()
|
||||
w.errChan <- fmt.Errorf("some error")
|
||||
cancel()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestWatchDeleteEventObjectHaveLatestRV(t *testing.T) {
|
||||
ctx, store, cluster := testSetup(t)
|
||||
defer cluster.Terminate(t)
|
||||
key, storedObj := testPropogateStore(ctx, t, store, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}})
|
||||
|
||||
w, err := store.Watch(ctx, key, storedObj.ResourceVersion, storage.Everything)
|
||||
if err != nil {
|
||||
t.Fatalf("Watch failed: %v", err)
|
||||
}
|
||||
etcdW := cluster.RandClient().Watch(ctx, "/", clientv3.WithPrefix())
|
||||
|
||||
if err := store.Delete(ctx, key, &api.Pod{}, &storage.Preconditions{}); err != nil {
|
||||
t.Fatalf("Delete failed: %v", err)
|
||||
}
|
||||
|
||||
e := <-w.ResultChan()
|
||||
watchedDeleteObj := e.Object.(*api.Pod)
|
||||
var wres clientv3.WatchResponse
|
||||
wres = <-etcdW
|
||||
|
||||
watchedDeleteRev, err := storage.ParseWatchResourceVersion(watchedDeleteObj.ResourceVersion)
|
||||
if err != nil {
|
||||
t.Fatalf("ParseWatchResourceVersion failed: %v", err)
|
||||
}
|
||||
if int64(watchedDeleteRev) != wres.Events[0].Kv.ModRevision {
|
||||
t.Errorf("Object from delete event have version: %v, should be the same as etcd delete's mod rev: %d",
|
||||
watchedDeleteRev, wres.Events[0].Kv.ModRevision)
|
||||
}
|
||||
}
|
||||
|
||||
type testWatchStruct struct {
|
||||
obj *api.Pod
|
||||
expectEvent bool
|
||||
watchType watch.EventType
|
||||
}
|
||||
|
||||
type testCodec struct {
|
||||
runtime.Codec
|
||||
}
|
||||
|
||||
func (c *testCodec) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
|
||||
return nil, nil, errors.New("Expected decoding failure")
|
||||
}
|
||||
|
||||
func testCheckEventType(t *testing.T, expectEventType watch.EventType, w watch.Interface) {
|
||||
select {
|
||||
case res := <-w.ResultChan():
|
||||
if res.Type != expectEventType {
|
||||
t.Errorf("event type want=%v, get=%v", expectEventType, res.Type)
|
||||
}
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
t.Errorf("time out after waiting %v on ResultChan", wait.ForeverTestTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
func testCheckResult(t *testing.T, i int, expectEventType watch.EventType, w watch.Interface, expectObj *api.Pod) {
|
||||
select {
|
||||
case res := <-w.ResultChan():
|
||||
if res.Type != expectEventType {
|
||||
t.Errorf("#%d: event type want=%v, get=%v", i, expectEventType, res.Type)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(expectObj, res.Object) {
|
||||
t.Errorf("#%d: obj want=\n%#v\nget=\n%#v", i, expectObj, res.Object)
|
||||
}
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
t.Errorf("#%d: time out after waiting %v on ResultChan", i, wait.ForeverTestTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
func testCheckStop(t *testing.T, i int, w watch.Interface) {
|
||||
select {
|
||||
case e, ok := <-w.ResultChan():
|
||||
if ok {
|
||||
var obj string
|
||||
switch e.Object.(type) {
|
||||
case *api.Pod:
|
||||
obj = e.Object.(*api.Pod).Name
|
||||
case *metav1.Status:
|
||||
obj = e.Object.(*metav1.Status).Message
|
||||
}
|
||||
t.Errorf("#%d: ResultChan should have been closed. Event: %s. Object: %s", i, e.Type, obj)
|
||||
}
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
t.Errorf("#%d: time out after waiting 1s on ResultChan", i)
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue