1
0
Fork 0
forked from barak/tarpoon

Add glide.yaml and vendor deps

This commit is contained in:
Dalton Hubble 2016-12-03 22:43:32 -08:00
parent db918f12ad
commit 5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions

26
vendor/k8s.io/kubernetes/test/integration/BUILD generated vendored Normal file
View file

@ -0,0 +1,26 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"utils.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/errors:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/core/v1:go_default_library",
"//pkg/util/wait:go_default_library",
],
)

View file

@ -0,0 +1,348 @@
// +build integration,!no-etcd
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package auth
import (
"errors"
"net/http"
"strings"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apimachinery/registered"
authorizationapi "k8s.io/kubernetes/pkg/apis/authorization"
"k8s.io/kubernetes/pkg/auth/authenticator"
"k8s.io/kubernetes/pkg/auth/authorizer"
"k8s.io/kubernetes/pkg/auth/user"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/plugin/pkg/admission/admit"
"k8s.io/kubernetes/test/integration/framework"
)
// Inject into master an authorizer that uses user info.
// TODO(etune): remove this test once a more comprehensive built-in authorizer is implemented.
type sarAuthorizer struct{}
func (sarAuthorizer) Authorize(a authorizer.Attributes) (bool, string, error) {
if a.GetUser().GetName() == "dave" {
return false, "no", errors.New("I'm sorry, Dave")
}
return true, "you're not dave", nil
}
func alwaysAlice(req *http.Request) (user.Info, bool, error) {
return &user.DefaultInfo{
Name: "alice",
}, true, nil
}
func TestSubjectAccessReview(t *testing.T) {
masterConfig := framework.NewIntegrationTestMasterConfig()
masterConfig.GenericConfig.Authenticator = authenticator.RequestFunc(alwaysAlice)
masterConfig.GenericConfig.Authorizer = sarAuthorizer{}
masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit()
_, s := framework.RunAMaster(masterConfig)
defer s.Close()
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
tests := []struct {
name string
sar *authorizationapi.SubjectAccessReview
expectedError string
expectedStatus authorizationapi.SubjectAccessReviewStatus
}{
{
name: "simple allow",
sar: &authorizationapi.SubjectAccessReview{
Spec: authorizationapi.SubjectAccessReviewSpec{
ResourceAttributes: &authorizationapi.ResourceAttributes{
Verb: "list",
Group: api.GroupName,
Version: "v1",
Resource: "pods",
},
User: "alice",
},
},
expectedStatus: authorizationapi.SubjectAccessReviewStatus{
Allowed: true,
Reason: "you're not dave",
},
},
{
name: "simple deny",
sar: &authorizationapi.SubjectAccessReview{
Spec: authorizationapi.SubjectAccessReviewSpec{
ResourceAttributes: &authorizationapi.ResourceAttributes{
Verb: "list",
Group: api.GroupName,
Version: "v1",
Resource: "pods",
},
User: "dave",
},
},
expectedStatus: authorizationapi.SubjectAccessReviewStatus{
Allowed: false,
Reason: "no",
EvaluationError: "I'm sorry, Dave",
},
},
{
name: "simple error",
sar: &authorizationapi.SubjectAccessReview{
Spec: authorizationapi.SubjectAccessReviewSpec{
ResourceAttributes: &authorizationapi.ResourceAttributes{
Verb: "list",
Group: api.GroupName,
Version: "v1",
Resource: "pods",
},
},
},
expectedError: "at least one of user or group must be specified",
},
}
for _, test := range tests {
response, err := clientset.Authorization().SubjectAccessReviews().Create(test.sar)
switch {
case err == nil && len(test.expectedError) == 0:
case err != nil && strings.Contains(err.Error(), test.expectedError):
continue
case err != nil && len(test.expectedError) != 0:
t.Errorf("%s: unexpected error: %v", test.name, err)
continue
default:
t.Errorf("%s: expected %v, got %v", test.name, test.expectedError, err)
continue
}
if response.Status != test.expectedStatus {
t.Errorf("%s: expected %v, got %v", test.name, test.expectedStatus, response.Status)
continue
}
}
}
func TestSelfSubjectAccessReview(t *testing.T) {
username := "alice"
masterConfig := framework.NewIntegrationTestMasterConfig()
masterConfig.GenericConfig.Authenticator = authenticator.RequestFunc(func(req *http.Request) (user.Info, bool, error) {
return &user.DefaultInfo{Name: username}, true, nil
})
masterConfig.GenericConfig.Authorizer = sarAuthorizer{}
masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit()
_, s := framework.RunAMaster(masterConfig)
defer s.Close()
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
tests := []struct {
name string
username string
sar *authorizationapi.SelfSubjectAccessReview
expectedError string
expectedStatus authorizationapi.SubjectAccessReviewStatus
}{
{
name: "simple allow",
username: "alice",
sar: &authorizationapi.SelfSubjectAccessReview{
Spec: authorizationapi.SelfSubjectAccessReviewSpec{
ResourceAttributes: &authorizationapi.ResourceAttributes{
Verb: "list",
Group: api.GroupName,
Version: "v1",
Resource: "pods",
},
},
},
expectedStatus: authorizationapi.SubjectAccessReviewStatus{
Allowed: true,
Reason: "you're not dave",
},
},
{
name: "simple deny",
username: "dave",
sar: &authorizationapi.SelfSubjectAccessReview{
Spec: authorizationapi.SelfSubjectAccessReviewSpec{
ResourceAttributes: &authorizationapi.ResourceAttributes{
Verb: "list",
Group: api.GroupName,
Version: "v1",
Resource: "pods",
},
},
},
expectedStatus: authorizationapi.SubjectAccessReviewStatus{
Allowed: false,
Reason: "no",
EvaluationError: "I'm sorry, Dave",
},
},
}
for _, test := range tests {
username = test.username
response, err := clientset.Authorization().SelfSubjectAccessReviews().Create(test.sar)
switch {
case err == nil && len(test.expectedError) == 0:
case err != nil && strings.Contains(err.Error(), test.expectedError):
continue
case err != nil && len(test.expectedError) != 0:
t.Errorf("%s: unexpected error: %v", test.name, err)
continue
default:
t.Errorf("%s: expected %v, got %v", test.name, test.expectedError, err)
continue
}
if response.Status != test.expectedStatus {
t.Errorf("%s: expected %v, got %v", test.name, test.expectedStatus, response.Status)
continue
}
}
}
func TestLocalSubjectAccessReview(t *testing.T) {
masterConfig := framework.NewIntegrationTestMasterConfig()
masterConfig.GenericConfig.Authenticator = authenticator.RequestFunc(alwaysAlice)
masterConfig.GenericConfig.Authorizer = sarAuthorizer{}
masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit()
_, s := framework.RunAMaster(masterConfig)
defer s.Close()
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
tests := []struct {
name string
namespace string
sar *authorizationapi.LocalSubjectAccessReview
expectedError string
expectedStatus authorizationapi.SubjectAccessReviewStatus
}{
{
name: "simple allow",
namespace: "foo",
sar: &authorizationapi.LocalSubjectAccessReview{
ObjectMeta: api.ObjectMeta{Namespace: "foo"},
Spec: authorizationapi.SubjectAccessReviewSpec{
ResourceAttributes: &authorizationapi.ResourceAttributes{
Verb: "list",
Group: api.GroupName,
Version: "v1",
Resource: "pods",
Namespace: "foo",
},
User: "alice",
},
},
expectedStatus: authorizationapi.SubjectAccessReviewStatus{
Allowed: true,
Reason: "you're not dave",
},
},
{
name: "simple deny",
namespace: "foo",
sar: &authorizationapi.LocalSubjectAccessReview{
ObjectMeta: api.ObjectMeta{Namespace: "foo"},
Spec: authorizationapi.SubjectAccessReviewSpec{
ResourceAttributes: &authorizationapi.ResourceAttributes{
Verb: "list",
Group: api.GroupName,
Version: "v1",
Resource: "pods",
Namespace: "foo",
},
User: "dave",
},
},
expectedStatus: authorizationapi.SubjectAccessReviewStatus{
Allowed: false,
Reason: "no",
EvaluationError: "I'm sorry, Dave",
},
},
{
name: "conflicting namespace",
namespace: "foo",
sar: &authorizationapi.LocalSubjectAccessReview{
ObjectMeta: api.ObjectMeta{Namespace: "foo"},
Spec: authorizationapi.SubjectAccessReviewSpec{
ResourceAttributes: &authorizationapi.ResourceAttributes{
Verb: "list",
Group: api.GroupName,
Version: "v1",
Resource: "pods",
Namespace: "bar",
},
User: "dave",
},
},
expectedError: "must match metadata.namespace",
},
{
name: "missing namespace",
namespace: "foo",
sar: &authorizationapi.LocalSubjectAccessReview{
ObjectMeta: api.ObjectMeta{Namespace: "foo"},
Spec: authorizationapi.SubjectAccessReviewSpec{
ResourceAttributes: &authorizationapi.ResourceAttributes{
Verb: "list",
Group: api.GroupName,
Version: "v1",
Resource: "pods",
},
User: "dave",
},
},
expectedError: "must match metadata.namespace",
},
}
for _, test := range tests {
response, err := clientset.Authorization().LocalSubjectAccessReviews(test.namespace).Create(test.sar)
switch {
case err == nil && len(test.expectedError) == 0:
case err != nil && strings.Contains(err.Error(), test.expectedError):
continue
case err != nil && len(test.expectedError) != 0:
t.Errorf("%s: unexpected error: %v", test.name, err)
continue
default:
t.Errorf("%s: expected %v, got %v", test.name, test.expectedError, err)
continue
}
if response.Status != test.expectedStatus {
t.Errorf("%s: expected %#v, got %#v", test.name, test.expectedStatus, response.Status)
continue
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,484 @@
// +build integration,!no-etcd
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package auth
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httputil"
"strings"
"testing"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/apimachinery/registered"
rbacapi "k8s.io/kubernetes/pkg/apis/rbac"
"k8s.io/kubernetes/pkg/auth/authenticator"
"k8s.io/kubernetes/pkg/auth/authenticator/bearertoken"
"k8s.io/kubernetes/pkg/auth/authorizer"
"k8s.io/kubernetes/pkg/auth/user"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/transport"
"k8s.io/kubernetes/pkg/master"
"k8s.io/kubernetes/pkg/registry/generic"
"k8s.io/kubernetes/pkg/registry/rbac/clusterrole"
clusterroleetcd "k8s.io/kubernetes/pkg/registry/rbac/clusterrole/etcd"
"k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding"
clusterrolebindingetcd "k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/etcd"
"k8s.io/kubernetes/pkg/registry/rbac/role"
roleetcd "k8s.io/kubernetes/pkg/registry/rbac/role/etcd"
"k8s.io/kubernetes/pkg/registry/rbac/rolebinding"
rolebindingetcd "k8s.io/kubernetes/pkg/registry/rbac/rolebinding/etcd"
"k8s.io/kubernetes/pkg/watch"
"k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac"
"k8s.io/kubernetes/test/integration/framework"
)
func newFakeAuthenticator() authenticator.Request {
return bearertoken.New(authenticator.TokenFunc(func(token string) (user.Info, bool, error) {
if token == "" {
return nil, false, errors.New("no bearer token found")
}
// Set the bearer token as the user name.
return &user.DefaultInfo{Name: token, UID: token}, true, nil
}))
}
func clientForUser(user string) *http.Client {
return &http.Client{
Transport: transport.NewBearerAuthRoundTripper(
user,
transport.DebugWrappers(http.DefaultTransport),
),
}
}
func clientsetForUser(user string, config *restclient.Config) clientset.Interface {
configCopy := *config
configCopy.BearerToken = user
return clientset.NewForConfigOrDie(&configCopy)
}
func newRBACAuthorizer(t *testing.T, superUser string, config *master.Config) authorizer.Authorizer {
newRESTOptions := func(resource string) generic.RESTOptions {
storageConfig, err := config.StorageFactory.NewConfig(rbacapi.Resource(resource))
if err != nil {
t.Fatalf("failed to get storage: %v", err)
}
return generic.RESTOptions{StorageConfig: storageConfig, Decorator: generic.UndecoratedStorage, ResourcePrefix: resource}
}
roleRegistry := role.AuthorizerAdapter{Registry: role.NewRegistry(roleetcd.NewREST(newRESTOptions("roles")))}
roleBindingRegistry := rolebinding.AuthorizerAdapter{Registry: rolebinding.NewRegistry(rolebindingetcd.NewREST(newRESTOptions("rolebindings")))}
clusterRoleRegistry := clusterrole.AuthorizerAdapter{Registry: clusterrole.NewRegistry(clusterroleetcd.NewREST(newRESTOptions("clusterroles")))}
clusterRoleBindingRegistry := clusterrolebinding.AuthorizerAdapter{Registry: clusterrolebinding.NewRegistry(clusterrolebindingetcd.NewREST(newRESTOptions("clusterrolebindings")))}
return rbac.New(roleRegistry, roleBindingRegistry, clusterRoleRegistry, clusterRoleBindingRegistry, superUser)
}
// bootstrapRoles are a set of RBAC roles which will be populated before the test.
type bootstrapRoles struct {
roles []rbacapi.Role
roleBindings []rbacapi.RoleBinding
clusterRoles []rbacapi.ClusterRole
clusterRoleBindings []rbacapi.ClusterRoleBinding
}
// bootstrap uses the provided client to create the bootstrap roles and role bindings.
//
// client should be authenticated as the RBAC super user.
func (b bootstrapRoles) bootstrap(client clientset.Interface) error {
for _, r := range b.clusterRoles {
_, err := client.Rbac().ClusterRoles().Create(&r)
if err != nil {
return fmt.Errorf("failed to make request: %v", err)
}
}
for _, r := range b.roles {
_, err := client.Rbac().Roles(r.Namespace).Create(&r)
if err != nil {
return fmt.Errorf("failed to make request: %v", err)
}
}
for _, r := range b.clusterRoleBindings {
_, err := client.Rbac().ClusterRoleBindings().Create(&r)
if err != nil {
return fmt.Errorf("failed to make request: %v", err)
}
}
for _, r := range b.roleBindings {
_, err := client.Rbac().RoleBindings(r.Namespace).Create(&r)
if err != nil {
return fmt.Errorf("failed to make request: %v", err)
}
}
return nil
}
// request is a test case which can.
type request struct {
// The username attempting to send the request.
user string
// Resource metadata
verb string
apiGroup string
resource string
namespace string
name string
// The actual resource.
body string
// The expected return status of this request.
expectedStatus int
}
func (r request) String() string {
return fmt.Sprintf("%s %s %s", r.user, r.verb, r.resource)
}
type statusCode int
func (s statusCode) String() string {
return fmt.Sprintf("%d %s", int(s), http.StatusText(int(s)))
}
// Declare a set of raw objects to use.
var (
aJob = `
{
"apiVersion": "batch/v1",
"kind": "Job",
"metadata": {
"name": "pi"%s
},
"spec": {
"template": {
"metadata": {
"name": "a",
"labels": {
"name": "pijob"
}
},
"spec": {
"containers": [
{
"name": "pi",
"image": "perl",
"command": [
"perl",
"-Mbignum=bpi",
"-wle",
"print bpi(2000)"
]
}
],
"restartPolicy": "Never"
}
}
}
}
`
podNamespace = `
{
"apiVersion": "` + registered.GroupOrDie(api.GroupName).GroupVersion.String() + `",
"kind": "Namespace",
"metadata": {
"name": "pod-namespace"%s
}
}
`
jobNamespace = `
{
"apiVersion": "` + registered.GroupOrDie(api.GroupName).GroupVersion.String() + `",
"kind": "Namespace",
"metadata": {
"name": "job-namespace"%s
}
}
`
forbiddenNamespace = `
{
"apiVersion": "` + registered.GroupOrDie(api.GroupName).GroupVersion.String() + `",
"kind": "Namespace",
"metadata": {
"name": "forbidden-namespace"%s
}
}
`
)
// Declare some PolicyRules beforehand.
var (
ruleAllowAll = rbacapi.NewRule("*").Groups("*").Resources("*").RuleOrDie()
ruleReadPods = rbacapi.NewRule("list", "get", "watch").Groups("").Resources("pods").RuleOrDie()
ruleWriteJobs = rbacapi.NewRule("*").Groups("batch").Resources("*").RuleOrDie()
)
func TestRBAC(t *testing.T) {
superUser := "admin"
tests := []struct {
bootstrapRoles bootstrapRoles
requests []request
}{
{
bootstrapRoles: bootstrapRoles{
clusterRoles: []rbacapi.ClusterRole{
{
ObjectMeta: api.ObjectMeta{Name: "allow-all"},
Rules: []rbacapi.PolicyRule{ruleAllowAll},
},
{
ObjectMeta: api.ObjectMeta{Name: "read-pods"},
Rules: []rbacapi.PolicyRule{ruleReadPods},
},
},
clusterRoleBindings: []rbacapi.ClusterRoleBinding{
{
ObjectMeta: api.ObjectMeta{Name: "read-pods"},
Subjects: []rbacapi.Subject{
{Kind: "User", Name: "pod-reader"},
},
RoleRef: rbacapi.RoleRef{Kind: "ClusterRole", Name: "read-pods"},
},
},
},
requests: []request{
// Create the namespace used later in the test
{superUser, "POST", "", "namespaces", "", "", podNamespace, http.StatusCreated},
{superUser, "GET", "", "pods", "", "", "", http.StatusOK},
{superUser, "GET", "", "pods", "pod-namespace", "a", "", http.StatusNotFound},
{superUser, "POST", "", "pods", "pod-namespace", "", aPod, http.StatusCreated},
{superUser, "GET", "", "pods", "pod-namespace", "a", "", http.StatusOK},
{"bob", "GET", "", "pods", "", "", "", http.StatusForbidden},
{"bob", "GET", "", "pods", "pod-namespace", "a", "", http.StatusForbidden},
{"pod-reader", "GET", "", "pods", "", "", "", http.StatusOK},
{"pod-reader", "POST", "", "pods", "pod-namespace", "", aPod, http.StatusForbidden},
},
},
{
bootstrapRoles: bootstrapRoles{
clusterRoles: []rbacapi.ClusterRole{
{
ObjectMeta: api.ObjectMeta{Name: "write-jobs"},
Rules: []rbacapi.PolicyRule{ruleWriteJobs},
},
},
clusterRoleBindings: []rbacapi.ClusterRoleBinding{
{
ObjectMeta: api.ObjectMeta{Name: "write-jobs"},
Subjects: []rbacapi.Subject{{Kind: "User", Name: "job-writer"}},
RoleRef: rbacapi.RoleRef{Kind: "ClusterRole", Name: "write-jobs"},
},
},
roleBindings: []rbacapi.RoleBinding{
{
ObjectMeta: api.ObjectMeta{Name: "write-jobs", Namespace: "job-namespace"},
Subjects: []rbacapi.Subject{{Kind: "User", Name: "job-writer-namespace"}},
RoleRef: rbacapi.RoleRef{Kind: "ClusterRole", Name: "write-jobs"},
},
},
},
requests: []request{
// Create the namespace used later in the test
{superUser, "POST", "", "namespaces", "", "", jobNamespace, http.StatusCreated},
{superUser, "POST", "", "namespaces", "", "", forbiddenNamespace, http.StatusCreated},
{"user-with-no-permissions", "POST", "batch", "jobs", "job-namespace", "", aJob, http.StatusForbidden},
{"user-with-no-permissions", "GET", "batch", "jobs", "job-namespace", "pi", "", http.StatusForbidden},
// job-writer-namespace cannot write to the "forbidden-namespace"
{"job-writer-namespace", "GET", "batch", "jobs", "forbidden-namespace", "", "", http.StatusForbidden},
{"job-writer-namespace", "GET", "batch", "jobs", "forbidden-namespace", "pi", "", http.StatusForbidden},
{"job-writer-namespace", "POST", "batch", "jobs", "forbidden-namespace", "", aJob, http.StatusForbidden},
{"job-writer-namespace", "GET", "batch", "jobs", "forbidden-namespace", "pi", "", http.StatusForbidden},
// job-writer can write to any namespace
{"job-writer", "GET", "batch", "jobs", "forbidden-namespace", "", "", http.StatusOK},
{"job-writer", "GET", "batch", "jobs", "forbidden-namespace", "pi", "", http.StatusNotFound},
{"job-writer", "POST", "batch", "jobs", "forbidden-namespace", "", aJob, http.StatusCreated},
{"job-writer", "GET", "batch", "jobs", "forbidden-namespace", "pi", "", http.StatusOK},
{"job-writer-namespace", "GET", "batch", "jobs", "job-namespace", "", "", http.StatusOK},
{"job-writer-namespace", "GET", "batch", "jobs", "job-namespace", "pi", "", http.StatusNotFound},
{"job-writer-namespace", "POST", "batch", "jobs", "job-namespace", "", aJob, http.StatusCreated},
{"job-writer-namespace", "GET", "batch", "jobs", "job-namespace", "pi", "", http.StatusOK},
},
},
}
for i, tc := range tests {
// Create an API Server.
masterConfig := framework.NewIntegrationTestMasterConfig()
masterConfig.GenericConfig.Authorizer = newRBACAuthorizer(t, superUser, masterConfig)
masterConfig.GenericConfig.Authenticator = newFakeAuthenticator()
masterConfig.GenericConfig.AuthorizerRBACSuperUser = superUser
_, s := framework.RunAMaster(masterConfig)
defer s.Close()
clientConfig := &restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: api.Codecs}}
// Bootstrap the API Server with the test case's initial roles.
if err := tc.bootstrapRoles.bootstrap(clientsetForUser(superUser, clientConfig)); err != nil {
t.Errorf("case %d: failed to apply initial roles: %v", i, err)
continue
}
previousResourceVersion := make(map[string]float64)
for j, r := range tc.requests {
testGroup, ok := testapi.Groups[r.apiGroup]
if !ok {
t.Errorf("case %d %d: unknown api group %q, %s", i, j, r.apiGroup, r)
continue
}
path := testGroup.ResourcePath(r.resource, r.namespace, r.name)
var body io.Reader
if r.body != "" {
sub := ""
if r.verb == "PUT" {
// For update operations, insert previous resource version
if resVersion := previousResourceVersion[getPreviousResourceVersionKey(path, "")]; resVersion != 0 {
sub += fmt.Sprintf(",\"resourceVersion\": \"%v\"", resVersion)
}
}
// For any creation requests, add the namespace to the object meta.
if r.verb == "POST" || r.verb == "PUT" {
if r.namespace != "" {
sub += fmt.Sprintf(",\"namespace\": %q", r.namespace)
}
}
body = strings.NewReader(fmt.Sprintf(r.body, sub))
}
req, err := http.NewRequest(r.verb, s.URL+path, body)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
func() {
reqDump, err := httputil.DumpRequest(req, true)
if err != nil {
t.Fatalf("failed to dump request: %v", err)
return
}
resp, err := clientForUser(r.user).Do(req)
if err != nil {
t.Errorf("case %d, req %d: failed to make request: %v", i, j, err)
return
}
defer resp.Body.Close()
respDump, err := httputil.DumpResponse(resp, true)
if err != nil {
t.Fatalf("failed to dump response: %v", err)
return
}
if resp.StatusCode != r.expectedStatus {
// When debugging is on, dump the entire request and response. Very helpful for
// debugging malformed test cases.
//
// To turn on debugging, use the '-args' flag.
//
// go test -v -tags integration -run RBAC -args -v 10
//
glog.V(8).Infof("case %d, req %d: %s\n%s\n", i, j, reqDump, respDump)
t.Errorf("case %d, req %d: %s expected %q got %q", i, j, r, statusCode(r.expectedStatus), statusCode(resp.StatusCode))
}
b, _ := ioutil.ReadAll(resp.Body)
if r.verb == "POST" && (resp.StatusCode/100) == 2 {
// For successful create operations, extract resourceVersion
id, currentResourceVersion, err := parseResourceVersion(b)
if err == nil {
key := getPreviousResourceVersionKey(path, id)
previousResourceVersion[key] = currentResourceVersion
} else {
t.Logf("error in trying to extract resource version: %s", err)
}
}
}()
}
}
}
func TestBootstrapping(t *testing.T) {
superUser := "admin"
masterConfig := framework.NewIntegrationTestMasterConfig()
masterConfig.GenericConfig.Authorizer = newRBACAuthorizer(t, superUser, masterConfig)
masterConfig.GenericConfig.Authenticator = newFakeAuthenticator()
masterConfig.GenericConfig.AuthorizerRBACSuperUser = superUser
_, s := framework.RunAMaster(masterConfig)
defer s.Close()
clientset := clientset.NewForConfigOrDie(&restclient.Config{BearerToken: superUser, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
watcher, err := clientset.Rbac().ClusterRoles().Watch(api.ListOptions{ResourceVersion: "0"})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
_, err = watch.Until(30*time.Second, watcher, func(event watch.Event) (bool, error) {
if event.Type != watch.Added {
return false, nil
}
return true, nil
})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterRoles, err := clientset.Rbac().ClusterRoles().List(api.ListOptions{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(clusterRoles.Items) == 0 {
t.Fatalf("missing cluster roles")
}
for _, clusterRole := range clusterRoles.Items {
if clusterRole.Name == "cluster-admin" {
return
}
}
t.Errorf("missing cluster-admin: %v", clusterRoles)
healthBytes, err := clientset.Discovery().RESTClient().Get().AbsPath("/healthz/poststarthooks/rbac/bootstrap-roles").DoRaw()
if err != nil {
t.Error(err)
}
t.Errorf("expected %v, got %v", "asdf", string(healthBytes))
}

View file

@ -0,0 +1,25 @@
{
"kind": "ReplicationController",
"apiVersion": "v1",
"metadata": {
"name": "test-controller",
"namespace": "test",
"labels": {"name": "test-controller"}
},
"spec": {
"replicas": 0,
"selector": {"name": "test-pod"},
"template": {
"metadata": {
"namespace": "test",
"labels": {"name": "test-pod"}
},
"spec": {
"containers": [{
"name": "test-container",
"image": "gcr.io/google_containers/pause-amd64:3.0"
}]
}
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,158 @@
// +build integration,!no-etcd
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"reflect"
"testing"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/typed/dynamic"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/test/integration/framework"
)
func TestDynamicClient(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
ns := framework.CreateTestingNamespace("dynamic-client", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
gv := &registered.GroupOrDie(v1.GroupName).GroupVersion
config := &restclient.Config{
Host: s.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: gv},
}
client := clientset.NewForConfigOrDie(config)
dynamicClient, err := dynamic.NewClient(config)
_ = dynamicClient
if err != nil {
t.Fatalf("unexpected error creating dynamic client: %v", err)
}
// Find the Pod resource
resources, err := client.Discovery().ServerResourcesForGroupVersion(gv.String())
if err != nil {
t.Fatalf("unexpected error listing resources: %v", err)
}
var resource metav1.APIResource
for _, r := range resources.APIResources {
if r.Kind == "Pod" {
resource = r
break
}
}
if len(resource.Name) == 0 {
t.Fatalf("could not find the pod resource in group/version %q", gv.String())
}
// Create a Pod with the normal client
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
GenerateName: "test",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "test",
Image: "test-image",
},
},
},
}
actual, err := client.Core().Pods(ns.Name).Create(pod)
if err != nil {
t.Fatalf("unexpected error when creating pod: %v", err)
}
// check dynamic list
obj, err := dynamicClient.Resource(&resource, ns.Name).List(&v1.ListOptions{})
unstructuredList, ok := obj.(*runtime.UnstructuredList)
if !ok {
t.Fatalf("expected *runtime.UnstructuredList, got %#v", obj)
}
if err != nil {
t.Fatalf("unexpected error when listing pods: %v", err)
}
if len(unstructuredList.Items) != 1 {
t.Fatalf("expected one pod, got %d", len(unstructuredList.Items))
}
got, err := unstructuredToPod(unstructuredList.Items[0])
if err != nil {
t.Fatalf("unexpected error converting Unstructured to v1.Pod: %v", err)
}
if !reflect.DeepEqual(actual, got) {
t.Fatalf("unexpected pod in list. wanted %#v, got %#v", actual, got)
}
// check dynamic get
unstruct, err := dynamicClient.Resource(&resource, ns.Name).Get(actual.Name)
if err != nil {
t.Fatalf("unexpected error when getting pod %q: %v", actual.Name, err)
}
got, err = unstructuredToPod(unstruct)
if err != nil {
t.Fatalf("unexpected error converting Unstructured to v1.Pod: %v", err)
}
if !reflect.DeepEqual(actual, got) {
t.Fatalf("unexpected pod in list. wanted %#v, got %#v", actual, got)
}
// delete the pod dynamically
err = dynamicClient.Resource(&resource, ns.Name).Delete(actual.Name, nil)
if err != nil {
t.Fatalf("unexpected error when deleting pod: %v", err)
}
list, err := client.Core().Pods(ns.Name).List(v1.ListOptions{})
if err != nil {
t.Fatalf("unexpected error when listing pods: %v", err)
}
if len(list.Items) != 0 {
t.Fatalf("expected zero pods, got %d", len(list.Items))
}
}
func unstructuredToPod(obj *runtime.Unstructured) (*v1.Pod, error) {
json, err := runtime.Encode(runtime.UnstructuredJSONScheme, obj)
if err != nil {
return nil, err
}
pod := new(v1.Pod)
err = runtime.DecodeInto(testapi.Default.Codec(), json, pod)
pod.Kind = ""
pod.APIVersion = ""
return pod, err
}

View file

@ -0,0 +1,125 @@
// +build integration,!no-etcd
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package configmap
// This file tests use of the configMap API resource.
import (
"testing"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/test/integration"
"k8s.io/kubernetes/test/integration/framework"
)
// TestConfigMap tests apiserver-side behavior of creation of ConfigMaps and pods that consume them.
func TestConfigMap(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
ns := framework.CreateTestingNamespace("config-map", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
DoTestConfigMap(t, client, ns)
}
func DoTestConfigMap(t *testing.T, client clientset.Interface, ns *v1.Namespace) {
cfg := v1.ConfigMap{
ObjectMeta: v1.ObjectMeta{
Name: "configmap",
Namespace: ns.Name,
},
Data: map[string]string{
"data-1": "value-1",
"data-2": "value-2",
"data-3": "value-3",
},
}
if _, err := client.Core().ConfigMaps(cfg.Namespace).Create(&cfg); err != nil {
t.Errorf("unable to create test configMap: %v", err)
}
defer deleteConfigMapOrErrorf(t, client, cfg.Namespace, cfg.Name)
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "XXX",
Namespace: ns.Name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fake-name",
Image: "fakeimage",
Env: []v1.EnvVar{
{
Name: "CONFIG_DATA_1",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "configmap",
},
Key: "data-1",
},
},
},
{
Name: "CONFIG_DATA_2",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "configmap",
},
Key: "data-2",
},
},
}, {
Name: "CONFIG_DATA_3",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "configmap",
},
Key: "data-3",
},
},
},
},
},
},
},
}
pod.ObjectMeta.Name = "uses-configmap"
if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil {
t.Errorf("Failed to create pod: %v", err)
}
defer integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name)
}
func deleteConfigMapOrErrorf(t *testing.T, c clientset.Interface, ns, name string) {
if err := c.Core().ConfigMaps(ns).Delete(name, nil); err != nil {
t.Errorf("unable to delete ConfigMap %v: %v", name, err)
}
}

View file

@ -0,0 +1,116 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package discoverysummarizer
import (
"fmt"
"net/http"
"testing"
"time"
"k8s.io/kubernetes/cmd/kubernetes-discovery/discoverysummarizer"
"k8s.io/kubernetes/examples/apiserver"
)
func waitForServerUp(serverURL string) error {
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
_, err := http.Get(serverURL)
if err == nil {
return nil
}
}
return fmt.Errorf("waiting for server timed out")
}
func testResponse(t *testing.T, serverURL, path string, expectedStatusCode int) {
response, err := http.Get(serverURL + path)
if err != nil {
t.Errorf("unexpected error in GET %s: %v", path, err)
}
if response.StatusCode != expectedStatusCode {
t.Errorf("unexpected status code: %v, expected: %v", response.StatusCode, expectedStatusCode)
}
}
func runDiscoverySummarizer(t *testing.T) string {
configFilePath := "../../../cmd/kubernetes-discovery/config.json"
port := "9090"
serverURL := "http://localhost:" + port
s, err := discoverysummarizer.NewDiscoverySummarizer(configFilePath)
if err != nil {
t.Errorf("unexpected error: %v\n", err)
}
go func() {
if err := s.Run(port); err != nil {
t.Fatalf("error in bringing up the server: %v", err)
}
}()
if err := waitForServerUp(serverURL); err != nil {
t.Fatalf("%v", err)
}
return serverURL
}
func runAPIServer(t *testing.T, stopCh <-chan struct{}) string {
serverRunOptions := apiserver.NewServerRunOptions()
// Change the ports, because otherwise it will fail if examples/apiserver/apiserver_test and this are run in parallel.
serverRunOptions.SecureServing.ServingOptions.BindPort = 6443 + 3
serverRunOptions.InsecureServing.BindPort = 8080 + 3
go func() {
if err := serverRunOptions.Run(stopCh); err != nil {
t.Fatalf("Error in bringing up the example apiserver: %v", err)
}
}()
serverURL := fmt.Sprintf("http://localhost:%d", serverRunOptions.InsecureServing.BindPort)
if err := waitForServerUp(serverURL); err != nil {
t.Fatalf("%v", err)
}
return serverURL
}
// Runs a discovery summarizer server and tests that all endpoints work as expected.
func TestRunDiscoverySummarizer(t *testing.T) {
discoveryURL := runDiscoverySummarizer(t)
// Test /api path.
// There is no server running at that URL, so we will get a 500.
testResponse(t, discoveryURL, "/api", http.StatusBadGateway)
// Test /apis path.
// There is no server running at that URL, so we will get a 500.
testResponse(t, discoveryURL, "/apis", http.StatusBadGateway)
// Test a random path, which should give a 404.
testResponse(t, discoveryURL, "/randomPath", http.StatusNotFound)
// Run the APIServer now to test the good case.
stopCh := make(chan struct{})
runAPIServer(t, stopCh)
defer close(stopCh)
// Test /api path.
// There is no server running at that URL, so we will get a 500.
testResponse(t, discoveryURL, "/api", http.StatusOK)
// Test /apis path.
// There is no server running at that URL, so we will get a 500.
testResponse(t, discoveryURL, "/apis", http.StatusOK)
// Test a random path, which should give a 404.
testResponse(t, discoveryURL, "/randomPath", http.StatusNotFound)
}

20
vendor/k8s.io/kubernetes/test/integration/doc.go generated vendored Normal file
View file

@ -0,0 +1,20 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package integration provides integration tests for Kubernetes. Use the integration
// build tag during `go test` to start the tests. Some tests require a running etcd
// or Docker installation on the system which you can skip with no-docker and no-etcd.
package integration // import "k8s.io/kubernetes/test/integration"

View file

@ -0,0 +1,180 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apiserver
import (
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"testing"
"time"
"k8s.io/kubernetes/cmd/libs/go2idl/client-gen/test_apis/testgroup/v1"
"github.com/golang/glog"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/examples/apiserver"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
)
var groupVersion = v1.SchemeGroupVersion
var groupVersionForDiscovery = metav1.GroupVersionForDiscovery{
GroupVersion: groupVersion.String(),
Version: groupVersion.Version,
}
func TestRunServer(t *testing.T) {
serverIP := fmt.Sprintf("http://localhost:%d", apiserver.InsecurePort)
stopCh := make(chan struct{})
go func() {
if err := apiserver.NewServerRunOptions().Run(stopCh); err != nil {
t.Fatalf("Error in bringing up the server: %v", err)
}
}()
defer close(stopCh)
if err := waitForApiserverUp(serverIP); err != nil {
t.Fatalf("%v", err)
}
testSwaggerSpec(t, serverIP)
testAPIGroupList(t, serverIP)
testAPIGroup(t, serverIP)
testAPIResourceList(t, serverIP)
}
func TestRunSecureServer(t *testing.T) {
serverIP := fmt.Sprintf("https://localhost:%d", apiserver.SecurePort)
stopCh := make(chan struct{})
go func() {
options := apiserver.NewServerRunOptions()
options.InsecureServing.BindPort = 0
options.SecureServing.ServingOptions.BindPort = apiserver.SecurePort
if err := options.Run(stopCh); err != nil {
t.Fatalf("Error in bringing up the server: %v", err)
}
}()
defer close(stopCh)
if err := waitForApiserverUp(serverIP); err != nil {
t.Fatalf("%v", err)
}
testSwaggerSpec(t, serverIP)
testAPIGroupList(t, serverIP)
testAPIGroup(t, serverIP)
testAPIResourceList(t, serverIP)
}
func waitForApiserverUp(serverIP string) error {
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
glog.Errorf("Waiting for : %#v", serverIP)
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
_, err := client.Get(serverIP)
if err == nil {
return nil
}
}
return fmt.Errorf("waiting for apiserver timed out")
}
func readResponse(serverURL string) ([]byte, error) {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
response, err := client.Get(serverURL)
if err != nil {
glog.Errorf("http get err code : %#v", err)
return nil, fmt.Errorf("Error in fetching %s: %v", serverURL, err)
}
defer response.Body.Close()
glog.Errorf("http get response code : %#v", response.StatusCode)
if response.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected status: %d for URL: %s, expected status: %d", response.StatusCode, serverURL, http.StatusOK)
}
contents, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, fmt.Errorf("Error reading response from %s: %v", serverURL, err)
}
return contents, nil
}
func testSwaggerSpec(t *testing.T, serverIP string) {
serverURL := serverIP + "/swaggerapi"
_, err := readResponse(serverURL)
if err != nil {
t.Fatalf("%v", err)
}
}
func testAPIGroupList(t *testing.T, serverIP string) {
serverURL := serverIP + "/apis"
contents, err := readResponse(serverURL)
if err != nil {
t.Fatalf("%v", err)
}
var apiGroupList metav1.APIGroupList
err = json.Unmarshal(contents, &apiGroupList)
if err != nil {
t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err)
}
assert.Equal(t, 1, len(apiGroupList.Groups))
assert.Equal(t, apiGroupList.Groups[0].Name, groupVersion.Group)
assert.Equal(t, 1, len(apiGroupList.Groups[0].Versions))
assert.Equal(t, apiGroupList.Groups[0].Versions[0], groupVersionForDiscovery)
assert.Equal(t, apiGroupList.Groups[0].PreferredVersion, groupVersionForDiscovery)
}
func testAPIGroup(t *testing.T, serverIP string) {
serverURL := serverIP + "/apis/testgroup.k8s.io"
contents, err := readResponse(serverURL)
if err != nil {
t.Fatalf("%v", err)
}
var apiGroup metav1.APIGroup
err = json.Unmarshal(contents, &apiGroup)
if err != nil {
t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err)
}
assert.Equal(t, apiGroup.APIVersion, groupVersion.Version)
assert.Equal(t, apiGroup.Name, groupVersion.Group)
assert.Equal(t, 1, len(apiGroup.Versions))
assert.Equal(t, apiGroup.Versions[0].GroupVersion, groupVersion.String())
assert.Equal(t, apiGroup.Versions[0].Version, groupVersion.Version)
assert.Equal(t, apiGroup.Versions[0], apiGroup.PreferredVersion)
}
func testAPIResourceList(t *testing.T, serverIP string) {
serverURL := serverIP + "/apis/testgroup.k8s.io/v1"
contents, err := readResponse(serverURL)
if err != nil {
t.Fatalf("%v", err)
}
var apiResourceList metav1.APIResourceList
err = json.Unmarshal(contents, &apiResourceList)
if err != nil {
t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err)
}
assert.Equal(t, apiResourceList.APIVersion, groupVersion.Version)
assert.Equal(t, apiResourceList.GroupVersion, groupVersion.String())
assert.Equal(t, 1, len(apiResourceList.APIResources))
assert.Equal(t, apiResourceList.APIResources[0].Name, "testtypes")
assert.True(t, apiResourceList.APIResources[0].Namespaced)
}

View file

@ -0,0 +1,382 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"regexp"
"testing"
"time"
"github.com/stretchr/testify/assert"
fed_v1b1 "k8s.io/kubernetes/federation/apis/federation/v1beta1"
"k8s.io/kubernetes/federation/cmd/federation-apiserver/app"
"k8s.io/kubernetes/federation/cmd/federation-apiserver/app/options"
"k8s.io/kubernetes/pkg/api/v1"
ext_v1b1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/runtime/schema"
)
func TestLongRunningRequestRegexp(t *testing.T) {
regexp := regexp.MustCompile(options.NewServerRunOptions().GenericServerRunOptions.LongRunningRequestRE)
dontMatch := []string{
"/api/v1/watch-namespace/",
"/api/v1/namespace-proxy/",
"/api/v1/namespace-watch",
"/api/v1/namespace-proxy",
"/api/v1/namespace-portforward/pods",
"/api/v1/portforward/pods",
". anything",
"/ that",
}
doMatch := []string{
"/api/v1/pods/watch",
"/api/v1/watch/stuff",
"/api/v1/default/service/proxy",
"/api/v1/pods/proxy/path/to/thing",
"/api/v1/namespaces/myns/pods/mypod/log",
"/api/v1/namespaces/myns/pods/mypod/logs",
"/api/v1/namespaces/myns/pods/mypod/portforward",
"/api/v1/namespaces/myns/pods/mypod/exec",
"/api/v1/namespaces/myns/pods/mypod/attach",
"/api/v1/namespaces/myns/pods/mypod/log/",
"/api/v1/namespaces/myns/pods/mypod/logs/",
"/api/v1/namespaces/myns/pods/mypod/portforward/",
"/api/v1/namespaces/myns/pods/mypod/exec/",
"/api/v1/namespaces/myns/pods/mypod/attach/",
"/api/v1/watch/namespaces/myns/pods",
}
for _, path := range dontMatch {
if regexp.MatchString(path) {
t.Errorf("path should not have match regexp but did: %s", path)
}
}
for _, path := range doMatch {
if !regexp.MatchString(path) {
t.Errorf("path should have match regexp did not: %s", path)
}
}
}
var securePort = 6443 + 2
var insecurePort = 8080 + 2
var serverIP = fmt.Sprintf("http://localhost:%v", insecurePort)
var groupVersions = []schema.GroupVersion{
fed_v1b1.SchemeGroupVersion,
ext_v1b1.SchemeGroupVersion,
}
func TestRun(t *testing.T) {
s := options.NewServerRunOptions()
s.SecureServing.ServingOptions.BindPort = securePort
s.InsecureServing.BindPort = insecurePort
_, ipNet, _ := net.ParseCIDR("10.10.10.0/24")
s.GenericServerRunOptions.ServiceClusterIPRange = *ipNet
s.Etcd.StorageConfig.ServerList = []string{"http://localhost:2379"}
go func() {
if err := app.Run(s); err != nil {
t.Fatalf("Error in bringing up the server: %v", err)
}
}()
if err := waitForApiserverUp(); err != nil {
t.Fatalf("%v", err)
}
testSwaggerSpec(t)
testSupport(t)
testAPIGroupList(t)
testAPIGroup(t)
testAPIResourceList(t)
}
func waitForApiserverUp() error {
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
_, err := http.Get(serverIP)
if err == nil {
return nil
}
}
return fmt.Errorf("waiting for apiserver timed out")
}
func readResponse(serverURL string) ([]byte, error) {
response, err := http.Get(serverURL)
if err != nil {
return nil, fmt.Errorf("Error in fetching %s: %v", serverURL, err)
}
defer response.Body.Close()
if response.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected status: %d for URL: %s, expected status: %d", response.StatusCode, serverURL, http.StatusOK)
}
contents, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, fmt.Errorf("Error reading response from %s: %v", serverURL, err)
}
return contents, nil
}
func testSwaggerSpec(t *testing.T) {
serverURL := serverIP + "/swaggerapi"
_, err := readResponse(serverURL)
if err != nil {
t.Fatalf("%v", err)
}
}
func testSupport(t *testing.T) {
serverURL := serverIP + "/version"
_, err := readResponse(serverURL)
if err != nil {
t.Fatalf("%v", err)
}
}
func findGroup(groups []metav1.APIGroup, groupName string) *metav1.APIGroup {
for _, group := range groups {
if group.Name == groupName {
return &group
}
}
return nil
}
func testAPIGroupList(t *testing.T) {
groupVersionForDiscoveryMap := make(map[string]metav1.GroupVersionForDiscovery)
for _, groupVersion := range groupVersions {
groupVersionForDiscoveryMap[groupVersion.Group] = metav1.GroupVersionForDiscovery{
GroupVersion: groupVersion.String(),
Version: groupVersion.Version,
}
}
serverURL := serverIP + "/apis"
contents, err := readResponse(serverURL)
if err != nil {
t.Fatalf("%v", err)
}
var apiGroupList metav1.APIGroupList
err = json.Unmarshal(contents, &apiGroupList)
if err != nil {
t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err)
}
for _, groupVersion := range groupVersions {
found := findGroup(apiGroupList.Groups, groupVersion.Group)
assert.NotNil(t, found)
assert.Equal(t, groupVersion.Group, found.Name)
assert.Equal(t, 1, len(found.Versions))
groupVersionForDiscovery := groupVersionForDiscoveryMap[groupVersion.Group]
assert.Equal(t, groupVersionForDiscovery, found.Versions[0])
assert.Equal(t, groupVersionForDiscovery, found.PreferredVersion)
}
}
func testAPIGroup(t *testing.T) {
for _, groupVersion := range groupVersions {
serverURL := serverIP + "/apis/" + groupVersion.Group
contents, err := readResponse(serverURL)
if err != nil {
t.Fatalf("%v", err)
}
var apiGroup metav1.APIGroup
err = json.Unmarshal(contents, &apiGroup)
if err != nil {
t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err)
}
// empty APIVersion for extensions group
if groupVersion.Group == "extensions" {
assert.Equal(t, "", apiGroup.APIVersion)
} else {
assert.Equal(t, "v1", apiGroup.APIVersion)
}
assert.Equal(t, apiGroup.Name, groupVersion.Group)
assert.Equal(t, 1, len(apiGroup.Versions))
assert.Equal(t, groupVersion.String(), apiGroup.Versions[0].GroupVersion)
assert.Equal(t, groupVersion.Version, apiGroup.Versions[0].Version)
assert.Equal(t, apiGroup.PreferredVersion, apiGroup.Versions[0])
}
testCoreAPIGroup(t)
}
func testCoreAPIGroup(t *testing.T) {
serverURL := serverIP + "/api"
contents, err := readResponse(serverURL)
if err != nil {
t.Fatalf("%v", err)
}
var apiVersions metav1.APIVersions
err = json.Unmarshal(contents, &apiVersions)
if err != nil {
t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err)
}
assert.Equal(t, 1, len(apiVersions.Versions))
assert.Equal(t, "v1", apiVersions.Versions[0])
assert.NotEmpty(t, apiVersions.ServerAddressByClientCIDRs)
}
func findResource(resources []metav1.APIResource, resourceName string) *metav1.APIResource {
for _, resource := range resources {
if resource.Name == resourceName {
return &resource
}
}
return nil
}
func testAPIResourceList(t *testing.T) {
testFederationResourceList(t)
testCoreResourceList(t)
testExtensionsResourceList(t)
}
func testFederationResourceList(t *testing.T) {
serverURL := serverIP + "/apis/" + fed_v1b1.SchemeGroupVersion.String()
contents, err := readResponse(serverURL)
if err != nil {
t.Fatalf("%v", err)
}
var apiResourceList metav1.APIResourceList
err = json.Unmarshal(contents, &apiResourceList)
if err != nil {
t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err)
}
assert.Equal(t, "v1", apiResourceList.APIVersion)
assert.Equal(t, fed_v1b1.SchemeGroupVersion.String(), apiResourceList.GroupVersion)
// Assert that there are exactly 2 resources.
assert.Equal(t, 2, len(apiResourceList.APIResources))
found := findResource(apiResourceList.APIResources, "clusters")
assert.NotNil(t, found)
assert.False(t, found.Namespaced)
found = findResource(apiResourceList.APIResources, "clusters/status")
assert.NotNil(t, found)
assert.False(t, found.Namespaced)
}
func testCoreResourceList(t *testing.T) {
serverURL := serverIP + "/api/" + v1.SchemeGroupVersion.String()
contents, err := readResponse(serverURL)
if err != nil {
t.Fatalf("%v", err)
}
var apiResourceList metav1.APIResourceList
err = json.Unmarshal(contents, &apiResourceList)
if err != nil {
t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err)
}
assert.Equal(t, "", apiResourceList.APIVersion)
assert.Equal(t, v1.SchemeGroupVersion.String(), apiResourceList.GroupVersion)
// Assert that there are exactly 7 resources.
assert.Equal(t, 8, len(apiResourceList.APIResources))
// Verify services.
found := findResource(apiResourceList.APIResources, "services")
assert.NotNil(t, found)
assert.True(t, found.Namespaced)
found = findResource(apiResourceList.APIResources, "services/status")
assert.NotNil(t, found)
assert.True(t, found.Namespaced)
// Verify namespaces.
found = findResource(apiResourceList.APIResources, "namespaces")
assert.NotNil(t, found)
assert.False(t, found.Namespaced)
found = findResource(apiResourceList.APIResources, "namespaces/status")
assert.NotNil(t, found)
assert.False(t, found.Namespaced)
found = findResource(apiResourceList.APIResources, "namespaces/finalize")
assert.NotNil(t, found)
assert.False(t, found.Namespaced)
// Verify events.
found = findResource(apiResourceList.APIResources, "events")
assert.NotNil(t, found)
assert.True(t, found.Namespaced)
// Verify secrets.
found = findResource(apiResourceList.APIResources, "secrets")
assert.NotNil(t, found)
assert.True(t, found.Namespaced)
// Verify config maps.
found = findResource(apiResourceList.APIResources, "configmaps")
assert.NotNil(t, found)
assert.True(t, found.Namespaced)
}
func testExtensionsResourceList(t *testing.T) {
serverURL := serverIP + "/apis/" + ext_v1b1.SchemeGroupVersion.String()
contents, err := readResponse(serverURL)
if err != nil {
t.Fatalf("%v", err)
}
var apiResourceList metav1.APIResourceList
err = json.Unmarshal(contents, &apiResourceList)
if err != nil {
t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err)
}
// empty APIVersion for extensions group
assert.Equal(t, "", apiResourceList.APIVersion)
assert.Equal(t, ext_v1b1.SchemeGroupVersion.String(), apiResourceList.GroupVersion)
// Assert that there are exactly 11 resources.
assert.Equal(t, 11, len(apiResourceList.APIResources))
// Verify replicasets.
found := findResource(apiResourceList.APIResources, "replicasets")
assert.NotNil(t, found)
assert.True(t, found.Namespaced)
found = findResource(apiResourceList.APIResources, "replicasets/status")
assert.NotNil(t, found)
assert.True(t, found.Namespaced)
found = findResource(apiResourceList.APIResources, "replicasets/scale")
assert.NotNil(t, found)
assert.True(t, found.Namespaced)
// Verify ingress.
found = findResource(apiResourceList.APIResources, "ingresses")
assert.NotNil(t, found)
assert.True(t, found.Namespaced)
found = findResource(apiResourceList.APIResources, "ingresses/status")
assert.NotNil(t, found)
assert.True(t, found.Namespaced)
// Verify daemonsets.
found = findResource(apiResourceList.APIResources, "daemonsets")
assert.NotNil(t, found)
assert.True(t, found.Namespaced)
found = findResource(apiResourceList.APIResources, "daemonsets/status")
assert.NotNil(t, found)
assert.True(t, found.Namespaced)
// Verify deployments.
found = findResource(apiResourceList.APIResources, "deployments")
assert.NotNil(t, found)
assert.True(t, found.Namespaced)
found = findResource(apiResourceList.APIResources, "deployments/status")
assert.NotNil(t, found)
assert.True(t, found.Namespaced)
found = findResource(apiResourceList.APIResources, "deployments/scale")
assert.NotNil(t, found)
assert.True(t, found.Namespaced)
found = findResource(apiResourceList.APIResources, "deployments/rollback")
}

View file

@ -0,0 +1,69 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"master_utils.go",
"perf_utils.go",
"serializer.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/resource:go_default_library",
"//pkg/api/testapi:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apimachinery/registered:go_default_library",
"//pkg/apis/apps/v1beta1:go_default_library",
"//pkg/apis/autoscaling/v1:go_default_library",
"//pkg/apis/batch:go_default_library",
"//pkg/apis/certificates/v1alpha1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/policy/v1alpha1:go_default_library",
"//pkg/apis/rbac/v1alpha1:go_default_library",
"//pkg/apis/storage/v1beta1:go_default_library",
"//pkg/apiserver/authenticator:go_default_library",
"//pkg/auth/authenticator:go_default_library",
"//pkg/auth/authorizer:go_default_library",
"//pkg/auth/authorizer/union:go_default_library",
"//pkg/auth/user:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/core/v1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/client/restclient:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/replication:go_default_library",
"//pkg/generated/openapi:go_default_library",
"//pkg/genericapiserver:go_default_library",
"//pkg/genericapiserver/authorizer:go_default_library",
"//pkg/kubectl:go_default_library",
"//pkg/kubelet/client:go_default_library",
"//pkg/master:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/runtime/schema:go_default_library",
"//pkg/runtime/serializer/versioning:go_default_library",
"//pkg/storage/storagebackend:go_default_library",
"//pkg/util/env:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/version:go_default_library",
"//pkg/watch:go_default_library",
"//plugin/pkg/admission/admit:go_default_library",
"//plugin/pkg/auth/authenticator/request/union:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
"//vendor:github.com/go-openapi/spec",
"//vendor:github.com/golang/glog",
"//vendor:github.com/pborman/uuid",
],
)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,103 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
e2eframework "k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
"github.com/golang/glog"
)
const (
retries = 5
)
type IntegrationTestNodePreparer struct {
client clientset.Interface
countToStrategy []testutils.CountToStrategy
nodeNamePrefix string
}
func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy, nodeNamePrefix string) testutils.TestNodePreparer {
return &IntegrationTestNodePreparer{
client: client,
countToStrategy: countToStrategy,
nodeNamePrefix: nodeNamePrefix,
}
}
func (p *IntegrationTestNodePreparer) PrepareNodes() error {
numNodes := 0
for _, v := range p.countToStrategy {
numNodes += v.Count
}
glog.Infof("Making %d nodes", numNodes)
baseNode := &v1.Node{
ObjectMeta: v1.ObjectMeta{
GenerateName: p.nodeNamePrefix,
},
Spec: v1.NodeSpec{
// TODO: investigate why this is needed.
ExternalID: "foo",
},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("32Gi"),
},
Phase: v1.NodeRunning,
Conditions: []v1.NodeCondition{
{Type: v1.NodeReady, Status: v1.ConditionTrue},
},
},
}
for i := 0; i < numNodes; i++ {
if _, err := p.client.Core().Nodes().Create(baseNode); err != nil {
glog.Fatalf("Error creating node: %v", err)
}
}
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
index := 0
sum := 0
for _, v := range p.countToStrategy {
sum += v.Count
for ; index < sum; index++ {
if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil {
glog.Errorf("Aborting node preparation: %v", err)
return err
}
}
}
return nil
}
func (p *IntegrationTestNodePreparer) CleanupNodes() error {
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
for i := range nodes.Items {
if err := p.client.Core().Nodes().Delete(nodes.Items[i].Name, &v1.DeleteOptions{}); err != nil {
glog.Errorf("Error while deleting Node: %v", err)
}
}
return nil
}

View file

@ -0,0 +1,53 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/runtime/serializer/versioning"
)
// NewSingleContentTypeSerializer wraps a serializer in a NegotiatedSerializer that handles one content type
func NewSingleContentTypeSerializer(scheme *runtime.Scheme, info runtime.SerializerInfo) runtime.StorageSerializer {
return &wrappedSerializer{
scheme: scheme,
info: info,
}
}
type wrappedSerializer struct {
scheme *runtime.Scheme
info runtime.SerializerInfo
}
var _ runtime.StorageSerializer = &wrappedSerializer{}
func (s *wrappedSerializer) SupportedMediaTypes() []runtime.SerializerInfo {
return []runtime.SerializerInfo{s.info}
}
func (s *wrappedSerializer) UniversalDeserializer() runtime.Decoder {
return s.info.Serializer
}
func (s *wrappedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder {
return versioning.NewCodec(encoder, nil, s.scheme, s.scheme, s.scheme, s.scheme, s.scheme, gv, nil)
}
func (s *wrappedSerializer) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder {
return versioning.NewCodec(nil, decoder, s.scheme, s.scheme, s.scheme, s.scheme, s.scheme, nil, gv)
}

View file

@ -0,0 +1,477 @@
// +build integration,!no-etcd
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package garbagecollector
import (
"fmt"
"net/http/httptest"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/golang/glog"
dto "github.com/prometheus/client_model/go"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/typed/dynamic"
"k8s.io/kubernetes/pkg/controller/garbagecollector"
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
"k8s.io/kubernetes/pkg/runtime/serializer"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/integration"
"k8s.io/kubernetes/test/integration/framework"
)
func getOrphanOptions() *v1.DeleteOptions {
var trueVar = true
return &v1.DeleteOptions{OrphanDependents: &trueVar}
}
func getNonOrphanOptions() *v1.DeleteOptions {
var falseVar = false
return &v1.DeleteOptions{OrphanDependents: &falseVar}
}
const garbageCollectedPodName = "test.pod.1"
const independentPodName = "test.pod.2"
const oneValidOwnerPodName = "test.pod.3"
const toBeDeletedRCName = "test.rc.1"
const remainingRCName = "test.rc.2"
func newPod(podName, podNamespace string, ownerReferences []v1.OwnerReference) *v1.Pod {
for i := 0; i < len(ownerReferences); i++ {
if len(ownerReferences[i].Kind) == 0 {
ownerReferences[i].Kind = "ReplicationController"
}
ownerReferences[i].APIVersion = "v1"
}
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: v1.ObjectMeta{
Name: podName,
Namespace: podNamespace,
OwnerReferences: ownerReferences,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fake-name",
Image: "fakeimage",
},
},
},
}
}
func newOwnerRC(name, namespace string) *v1.ReplicationController {
return &v1.ReplicationController{
TypeMeta: metav1.TypeMeta{
Kind: "ReplicationController",
APIVersion: "v1",
},
ObjectMeta: v1.ObjectMeta{
Namespace: namespace,
Name: name,
},
Spec: v1.ReplicationControllerSpec{
Selector: map[string]string{"name": "test"},
Template: &v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"name": "test"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fake-name",
Image: "fakeimage",
},
},
},
},
},
}
}
func setup(t *testing.T) (*httptest.Server, *garbagecollector.GarbageCollector, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig()
masterConfig.EnableCoreControllers = false
masterConfig.GenericConfig.EnableGarbageCollection = true
_, s := framework.RunAMaster(masterConfig)
clientSet, err := clientset.NewForConfig(&restclient.Config{Host: s.URL})
if err != nil {
t.Fatalf("Error in create clientset: %v", err)
}
groupVersionResources, err := clientSet.Discovery().ServerPreferredResources()
if err != nil {
t.Fatalf("Failed to get supported resources from server: %v", err)
}
config := &restclient.Config{Host: s.URL}
config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
metaOnlyClientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
config.ContentConfig.NegotiatedSerializer = nil
clientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
gc, err := garbagecollector.NewGarbageCollector(metaOnlyClientPool, clientPool, registered.RESTMapper(), groupVersionResources)
if err != nil {
t.Fatalf("Failed to create garbage collector")
}
return s, gc, clientSet
}
// This test simulates the cascading deletion.
func TestCascadingDeletion(t *testing.T) {
glog.V(6).Infof("TestCascadingDeletion starts")
defer glog.V(6).Infof("TestCascadingDeletion ends")
s, gc, clientSet := setup(t)
defer s.Close()
ns := framework.CreateTestingNamespace("gc-cascading-deletion", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
rcClient := clientSet.Core().ReplicationControllers(ns.Name)
podClient := clientSet.Core().Pods(ns.Name)
toBeDeletedRC, err := rcClient.Create(newOwnerRC(toBeDeletedRCName, ns.Name))
if err != nil {
t.Fatalf("Failed to create replication controller: %v", err)
}
remainingRC, err := rcClient.Create(newOwnerRC(remainingRCName, ns.Name))
if err != nil {
t.Fatalf("Failed to create replication controller: %v", err)
}
rcs, err := rcClient.List(v1.ListOptions{})
if err != nil {
t.Fatalf("Failed to list replication controllers: %v", err)
}
if len(rcs.Items) != 2 {
t.Fatalf("Expect only 2 replication controller")
}
// this pod should be cascadingly deleted.
pod := newPod(garbageCollectedPodName, ns.Name, []v1.OwnerReference{{UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRCName}})
_, err = podClient.Create(pod)
if err != nil {
t.Fatalf("Failed to create Pod: %v", err)
}
// this pod shouldn't be cascadingly deleted, because it has a valid reference.
pod = newPod(oneValidOwnerPodName, ns.Name, []v1.OwnerReference{
{UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRCName},
{UID: remainingRC.ObjectMeta.UID, Name: remainingRCName},
})
_, err = podClient.Create(pod)
if err != nil {
t.Fatalf("Failed to create Pod: %v", err)
}
// this pod shouldn't be cascadingly deleted, because it doesn't have an owner.
pod = newPod(independentPodName, ns.Name, []v1.OwnerReference{})
_, err = podClient.Create(pod)
if err != nil {
t.Fatalf("Failed to create Pod: %v", err)
}
// set up watch
pods, err := podClient.List(v1.ListOptions{})
if err != nil {
t.Fatalf("Failed to list pods: %v", err)
}
if len(pods.Items) != 3 {
t.Fatalf("Expect only 3 pods")
}
stopCh := make(chan struct{})
go gc.Run(5, stopCh)
defer close(stopCh)
// delete one of the replication controller
if err := rcClient.Delete(toBeDeletedRCName, getNonOrphanOptions()); err != nil {
t.Fatalf("failed to delete replication controller: %v", err)
}
// sometimes the deletion of the RC takes long time to be observed by
// the gc, so wait for the garbage collector to observe the deletion of
// the toBeDeletedRC
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
return !gc.GraphHasUID([]types.UID{toBeDeletedRC.ObjectMeta.UID}), nil
}); err != nil {
t.Fatal(err)
}
if err := integration.WaitForPodToDisappear(podClient, garbageCollectedPodName, 5*time.Second, 30*time.Second); err != nil {
t.Fatalf("expect pod %s to be garbage collected, got err= %v", garbageCollectedPodName, err)
}
// checks the garbage collect doesn't delete pods it shouldn't delete.
if _, err := podClient.Get(independentPodName); err != nil {
t.Fatal(err)
}
if _, err := podClient.Get(oneValidOwnerPodName); err != nil {
t.Fatal(err)
}
}
// This test simulates the case where an object is created with an owner that
// doesn't exist. It verifies the GC will delete such an object.
func TestCreateWithNonExistentOwner(t *testing.T) {
s, gc, clientSet := setup(t)
defer s.Close()
ns := framework.CreateTestingNamespace("gc-non-existing-owner", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
podClient := clientSet.Core().Pods(ns.Name)
pod := newPod(garbageCollectedPodName, ns.Name, []v1.OwnerReference{{UID: "doesn't matter", Name: toBeDeletedRCName}})
_, err := podClient.Create(pod)
if err != nil {
t.Fatalf("Failed to create Pod: %v", err)
}
// set up watch
pods, err := podClient.List(v1.ListOptions{})
if err != nil {
t.Fatalf("Failed to list pods: %v", err)
}
if len(pods.Items) != 1 {
t.Fatalf("Expect only 1 pod")
}
stopCh := make(chan struct{})
go gc.Run(5, stopCh)
defer close(stopCh)
// wait for the garbage collector to delete the pod
if err := integration.WaitForPodToDisappear(podClient, garbageCollectedPodName, 5*time.Second, 30*time.Second); err != nil {
t.Fatalf("expect pod %s to be garbage collected, got err= %v", garbageCollectedPodName, err)
}
}
func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet clientset.Interface, nameSuffix, namespace string, initialFinalizers []string, options *v1.DeleteOptions, wg *sync.WaitGroup, rcUIDs chan types.UID) {
defer wg.Done()
rcClient := clientSet.Core().ReplicationControllers(namespace)
podClient := clientSet.Core().Pods(namespace)
// create rc.
rcName := "test.rc." + nameSuffix
rc := newOwnerRC(rcName, namespace)
rc.ObjectMeta.Finalizers = initialFinalizers
rc, err := rcClient.Create(rc)
if err != nil {
t.Fatalf("Failed to create replication controller: %v", err)
}
rcUIDs <- rc.ObjectMeta.UID
// create pods.
var podUIDs []types.UID
for j := 0; j < 3; j++ {
podName := "test.pod." + nameSuffix + "-" + strconv.Itoa(j)
pod := newPod(podName, namespace, []v1.OwnerReference{{UID: rc.ObjectMeta.UID, Name: rc.ObjectMeta.Name}})
_, err = podClient.Create(pod)
if err != nil {
t.Fatalf("Failed to create Pod: %v", err)
}
podUIDs = append(podUIDs, pod.ObjectMeta.UID)
}
orphan := (options != nil && options.OrphanDependents != nil && *options.OrphanDependents) || (options == nil && len(initialFinalizers) != 0 && initialFinalizers[0] == api.FinalizerOrphan)
// if we intend to orphan the pods, we need wait for the gc to observe the
// creation of the pods, otherwise if the deletion of RC is observed before
// the creation of the pods, the pods will not be orphaned.
if orphan {
wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) { return gc.GraphHasUID(podUIDs), nil })
}
// delete the rc
if err := rcClient.Delete(rc.ObjectMeta.Name, options); err != nil {
t.Fatalf("failed to delete replication controller: %v", err)
}
}
func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rcNum, podNum int) (bool, error) {
rcClient := clientSet.Core().ReplicationControllers(namespace)
podClient := clientSet.Core().Pods(namespace)
pods, err := podClient.List(v1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
var ret = true
if len(pods.Items) != podNum {
ret = false
t.Logf("expect %d pods, got %d pods", podNum, len(pods.Items))
}
rcs, err := rcClient.List(v1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list replication controllers: %v", err)
}
if len(rcs.Items) != rcNum {
ret = false
t.Logf("expect %d RCs, got %d RCs", rcNum, len(rcs.Items))
}
return ret, nil
}
// The stress test is not very stressful, because we need to control the running
// time of our pre-submit tests to increase submit-queue throughput. We'll add
// e2e tests that put more stress.
func TestStressingCascadingDeletion(t *testing.T) {
t.Logf("starts garbage collector stress test")
s, gc, clientSet := setup(t)
defer s.Close()
ns := framework.CreateTestingNamespace("gc-stressing-cascading-deletion", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
stopCh := make(chan struct{})
go gc.Run(5, stopCh)
defer close(stopCh)
const collections = 10
var wg sync.WaitGroup
wg.Add(collections * 4)
rcUIDs := make(chan types.UID, collections*4)
for i := 0; i < collections; i++ {
// rc is created with empty finalizers, deleted with nil delete options, pods will remain.
go setupRCsPods(t, gc, clientSet, "collection1-"+strconv.Itoa(i), ns.Name, []string{}, nil, &wg, rcUIDs)
// rc is created with the orphan finalizer, deleted with nil options, pods will remain.
go setupRCsPods(t, gc, clientSet, "collection2-"+strconv.Itoa(i), ns.Name, []string{api.FinalizerOrphan}, nil, &wg, rcUIDs)
// rc is created with the orphan finalizer, deleted with DeleteOptions.OrphanDependents=false, pods will be deleted.
go setupRCsPods(t, gc, clientSet, "collection3-"+strconv.Itoa(i), ns.Name, []string{api.FinalizerOrphan}, getNonOrphanOptions(), &wg, rcUIDs)
// rc is created with empty finalizers, deleted with DeleteOptions.OrphanDependents=true, pods will remain.
go setupRCsPods(t, gc, clientSet, "collection4-"+strconv.Itoa(i), ns.Name, []string{}, getOrphanOptions(), &wg, rcUIDs)
}
wg.Wait()
t.Logf("all pods are created, all replications controllers are created then deleted")
// wait for the RCs and Pods to reach the expected numbers.
if err := wait.Poll(5*time.Second, 300*time.Second, func() (bool, error) {
podsInEachCollection := 3
// see the comments on the calls to setupRCsPods for details
remainingGroups := 3
return verifyRemainingObjects(t, clientSet, ns.Name, 0, collections*podsInEachCollection*remainingGroups)
}); err != nil {
t.Fatal(err)
}
t.Logf("number of remaining replication controllers and pods are as expected")
// verify the remaining pods all have "orphan" in their names.
podClient := clientSet.Core().Pods(ns.Name)
pods, err := podClient.List(v1.ListOptions{})
if err != nil {
t.Fatal(err)
}
for _, pod := range pods.Items {
if !strings.Contains(pod.ObjectMeta.Name, "collection1-") && !strings.Contains(pod.ObjectMeta.Name, "collection2-") && !strings.Contains(pod.ObjectMeta.Name, "collection4-") {
t.Errorf("got unexpected remaining pod: %#v", pod)
}
}
// verify there is no node representing replication controllers in the gc's graph
uids := make([]types.UID, 0, collections)
for i := 0; i < collections; i++ {
uid := <-rcUIDs
uids = append(uids, uid)
}
if gc.GraphHasUID(uids) {
t.Errorf("Expect all nodes representing replication controllers are removed from the Propagator's graph")
}
metric := &dto.Metric{}
garbagecollector.EventProcessingLatency.Write(metric)
count := float64(metric.Summary.GetSampleCount())
sum := metric.Summary.GetSampleSum()
t.Logf("Average time spent in GC's eventQueue is %.1f microseconds", sum/count)
garbagecollector.DirtyProcessingLatency.Write(metric)
count = float64(metric.Summary.GetSampleCount())
sum = metric.Summary.GetSampleSum()
t.Logf("Average time spent in GC's dirtyQueue is %.1f microseconds", sum/count)
garbagecollector.OrphanProcessingLatency.Write(metric)
count = float64(metric.Summary.GetSampleCount())
sum = metric.Summary.GetSampleSum()
t.Logf("Average time spent in GC's orphanQueue is %.1f microseconds", sum/count)
}
func TestOrphaning(t *testing.T) {
s, gc, clientSet := setup(t)
defer s.Close()
ns := framework.CreateTestingNamespace("gc-orphaning", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
podClient := clientSet.Core().Pods(ns.Name)
rcClient := clientSet.Core().ReplicationControllers(ns.Name)
// create the RC with the orphan finalizer set
toBeDeletedRC := newOwnerRC(toBeDeletedRCName, ns.Name)
toBeDeletedRC, err := rcClient.Create(toBeDeletedRC)
if err != nil {
t.Fatalf("Failed to create replication controller: %v", err)
}
// these pods should be ophaned.
var podUIDs []types.UID
podsNum := 3
for i := 0; i < podsNum; i++ {
podName := garbageCollectedPodName + strconv.Itoa(i)
pod := newPod(podName, ns.Name, []v1.OwnerReference{{UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRCName}})
_, err = podClient.Create(pod)
if err != nil {
t.Fatalf("Failed to create Pod: %v", err)
}
podUIDs = append(podUIDs, pod.ObjectMeta.UID)
}
stopCh := make(chan struct{})
go gc.Run(5, stopCh)
defer close(stopCh)
// we need wait for the gc to observe the creation of the pods, otherwise if
// the deletion of RC is observed before the creation of the pods, the pods
// will not be orphaned.
wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) { return gc.GraphHasUID(podUIDs), nil })
err = rcClient.Delete(toBeDeletedRCName, getOrphanOptions())
if err != nil {
t.Fatalf("Failed to gracefully delete the rc: %v", err)
}
// verify the toBeDeleteRC is deleted
if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
rcs, err := rcClient.List(v1.ListOptions{})
if err != nil {
return false, err
}
if len(rcs.Items) == 0 {
t.Logf("Still has %d RCs", len(rcs.Items))
return true, nil
}
return false, nil
}); err != nil {
t.Errorf("unexpected error: %v", err)
}
// verify pods don't have the ownerPod as an owner anymore
pods, err := podClient.List(v1.ListOptions{})
if err != nil {
t.Fatalf("Failed to list pods: %v", err)
}
if len(pods.Items) != podsNum {
t.Errorf("Expect %d pod(s), but got %#v", podsNum, pods)
}
for _, pod := range pods.Items {
if len(pod.ObjectMeta.OwnerReferences) != 0 {
t.Errorf("pod %s still has non-empty OwnerRefereces: %v", pod.ObjectMeta.Name, pod.ObjectMeta.OwnerReferences)
}
}
}

View file

@ -0,0 +1,78 @@
// +build integration,!no-etcd
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubectl
import (
"testing"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
"k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/test/integration/framework"
)
func TestKubectlValidation(t *testing.T) {
testCases := []struct {
data string
// Validation should not fail on missing type information.
err bool
}{
{`{"apiVersion": "v1", "kind": "thisObjectShouldNotExistInAnyGroup"}`, true},
{`{"apiVersion": "invalidVersion", "kind": "Pod"}`, false},
{`{"apiVersion": "v1", "kind": "Pod"}`, false},
// The following test the experimental api.
// TODO: Replace with something more robust. These may move.
{`{"apiVersion": "extensions/v1beta1", "kind": "Ingress"}`, false},
{`{"apiVersion": "extensions/v1beta1", "kind": "Job"}`, false},
{`{"apiVersion": "vNotAVersion", "kind": "Job"}`, false},
}
components := framework.NewMasterComponents(&framework.Config{})
defer components.Stop(true, true)
ctx := clientcmdapi.NewContext()
cfg := clientcmdapi.NewConfig()
cluster := clientcmdapi.NewCluster()
cluster.Server = components.ApiServer.URL
cluster.InsecureSkipTLSVerify = true
cfg.Contexts = map[string]*clientcmdapi.Context{"test": ctx}
cfg.CurrentContext = "test"
overrides := clientcmd.ConfigOverrides{
ClusterInfo: *cluster,
}
cmdConfig := clientcmd.NewNonInteractiveClientConfig(*cfg, "test", &overrides, nil)
factory := util.NewFactory(cmdConfig)
schema, err := factory.Validator(true, "")
if err != nil {
t.Errorf("failed to get validator: %v", err)
return
}
for i, test := range testCases {
err := schema.ValidateBytes([]byte(test.data))
if err == nil {
if test.err {
t.Errorf("case %d: expected error", i)
}
} else {
if !test.err {
t.Errorf("case %d: unexpected error: %v", i, err)
}
}
}
}

View file

@ -0,0 +1,201 @@
// +build benchmark,!no-etcd,!integration
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package master
import (
"flag"
"fmt"
"os"
"testing"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/integration/framework"
)
// Command line flag globals, parsed in init and used by the benchmarks:
// * pods && !tasks: Start -pods, scale number of parallel tasks with b.N
// * !pods && tasks: Start -tasks, scale pods with b.N
// * pods && tasks: Ignore b.N, benchmark behaves like a test constrained by -benchtime.
// * !pods && !tasks: scale pods and workers with b.N.
// -workers specifies the number of workers to shard tasks across.
// Eg: go test bench . -bench-pods 3000 -bench-tasks 100 -bench-tasks 10:
// Create 100 tasks each listing 3000 pods, and run them 10 at a time.
var (
Workers int
Pods int
Tasks int
)
const Glog_fatalf = 3
func init() {
q := flag.Int("bench-quiet", 3, "Quietness, don't glog severities <= given level during the benchmark.")
pods := flag.Int("bench-pods", -1, "Number of pods for the benchmark. If unspecified, uses b.N.")
workers := flag.Int("bench-workers", -1, "Number workers for the benchmark. If unspecified, uses tasks.")
tasks := flag.Int("bench-tasks", -1, "Number of tasks for the benchmark. These tasks are sharded across the workers. If unspecified, uses b.N.")
flag.Parse()
// Unfortunately this v level goes in the opposite direction as stderrthreshold.
flag.Set("v", fmt.Sprintf("%d", *q))
// We need quiet logs to parse benchmark results, which includes Errorf.
flag.Set("logtostderr", "false")
flag.Set("stderrthreshold", fmt.Sprintf("%d", Glog_fatalf-*q))
Pods = *pods
Workers = *workers
Tasks = *tasks
}
// getPods returns the cmd line -pods or b.N if -pods wasn't specified.
// Benchmarks can call getPods to get the number of pods they need to
// create for a given benchmark.
func getPods(bN int) int {
if Pods < 0 {
return bN
}
return Pods
}
// getTasks returns the cmd line -workers or b.N if -workers wasn't specified.
// Benchmarks would call getTasks to get the number of workers required to
// perform the benchmark in parallel.
func getTasks(bN int) int {
if Tasks < 0 {
return bN
}
return Tasks
}
// getIterations returns the number of iterations required by each benchmark for
// go to produce reliable timing results.
func getIterations(bN int) int {
// Anything with constant pods is only linear if we iterate b.N times.
if Pods > 0 {
return bN
}
return 1
}
// startPodsOnNodes creates numPods sharded across numNodes
func startPodsOnNodes(ns string, numPods, numNodes int, restClient clientset.Interface) {
podsPerNode := numPods / numNodes
if podsPerNode < 1 {
podsPerNode = 1
}
framework.RunParallel(func(id int) error {
return framework.StartPods(podsPerNode, fmt.Sprintf("host.%d", id), restClient)
}, numNodes, -1)
}
// Benchmark pod listing by waiting on `Tasks` listers to list `Pods` pods via `Workers`.
func BenchmarkPodList(b *testing.B) {
b.StopTimer()
m := framework.NewMasterComponents(&framework.Config{nil, true, false, 250.0, 500})
defer m.Stop(true, true)
ns := framework.CreateTestingNamespace("benchmark-pod-list", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
numPods, numTasks, iter := getPods(b.N), getTasks(b.N), getIterations(b.N)
podsPerNode := numPods / numTasks
if podsPerNode < 1 {
podsPerNode = 1
}
glog.Infof("Starting benchmark: b.N %d, pods %d, workers %d, podsPerNode %d",
b.N, numPods, numTasks, podsPerNode)
startPodsOnNodes(ns.Name, numPods, numTasks, m.RestClient)
// Stop the rc manager so it doesn't steal resources
m.Stop(false, true)
b.StartTimer()
for i := 0; i < iter; i++ {
framework.RunParallel(func(id int) error {
host := fmt.Sprintf("host.%d", id)
now := time.Now()
defer func() {
glog.V(3).Infof("Worker %d: Node %v listing pods took %v", id, host, time.Since(now))
}()
if pods, err := m.ClientSet.Core().Pods(ns.Name).List(api.ListOptions{
LabelSelector: labels.Everything(),
FieldSelector: fields.OneTermEqualSelector(api.PodHostField, host),
}); err != nil {
return err
} else if len(pods.Items) < podsPerNode {
glog.Fatalf("List retrieved %d pods, which is less than %d", len(pods.Items), podsPerNode)
}
return nil
}, numTasks, Workers)
}
b.StopTimer()
}
// Benchmark pod listing by waiting on `Tasks` listers to list `Pods` pods via `Workers`.
func BenchmarkPodListEtcd(b *testing.B) {
b.StopTimer()
m := framework.NewMasterComponents(&framework.Config{nil, true, false, 250.0, 500})
defer m.Stop(true, true)
ns := framework.CreateTestingNamespace("benchmark-pod-list-etcd", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
numPods, numTasks, iter := getPods(b.N), getTasks(b.N), getIterations(b.N)
podsPerNode := numPods / numTasks
if podsPerNode < 1 {
podsPerNode = 1
}
startPodsOnNodes(ns.Name, numPods, numTasks, m.RestClient)
// Stop the rc manager so it doesn't steal resources
m.Stop(false, true)
glog.Infof("Starting benchmark: b.N %d, pods %d, workers %d, podsPerNode %d",
b.N, numPods, numTasks, podsPerNode)
b.StartTimer()
for i := 0; i < iter; i++ {
framework.RunParallel(func(id int) error {
now := time.Now()
defer func() {
glog.V(3).Infof("Worker %d: listing pods took %v", id, time.Since(now))
}()
pods, err := m.ClientSet.Core().Pods(ns.Name).List(api.ListOptions{
LabelSelector: labels.Everything(),
FieldSelector: fields.Everything(),
})
if err != nil {
return err
}
if len(pods.Items) < numPods {
glog.Fatalf("List retrieved %d pods, which is less than %d", len(pods.Items), numPods)
}
return nil
}, numTasks, Workers)
}
b.StopTimer()
}
func TestMain(m *testing.M) {
os.Exit(m.Run())
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,17 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["doc.go"],
tags = ["automanaged"],
)

View file

@ -0,0 +1,17 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics

View file

@ -0,0 +1,124 @@
// +build integration,!no-etcd,linux
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"bufio"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/test/integration/framework"
"github.com/golang/glog"
"github.com/golang/protobuf/proto"
prometheuspb "github.com/prometheus/client_model/go"
)
const scrapeRequestHeader = "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text"
func scrapeMetrics(s *httptest.Server) ([]*prometheuspb.MetricFamily, error) {
req, err := http.NewRequest("GET", s.URL+"/metrics", nil)
if err != nil {
return nil, fmt.Errorf("Unable to create http request: %v", err)
}
// Ask the prometheus exporter for its text protocol buffer format, since it's
// much easier to parse than its plain-text format. Don't use the serialized
// proto representation since it uses a non-standard varint delimiter between
// metric families.
req.Header.Add("Accept", scrapeRequestHeader)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("Unable to contact metrics endpoint of master: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, fmt.Errorf("Non-200 response trying to scrape metrics from master: %v", resp)
}
// Each line in the response body should contain all the data for a single metric.
var metrics []*prometheuspb.MetricFamily
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
var metric prometheuspb.MetricFamily
if err := proto.UnmarshalText(scanner.Text(), &metric); err != nil {
return nil, fmt.Errorf("Failed to unmarshal line of metrics response: %v", err)
}
glog.V(4).Infof("Got metric %q", metric.GetName())
metrics = append(metrics, &metric)
}
return metrics, nil
}
func checkForExpectedMetrics(t *testing.T, metrics []*prometheuspb.MetricFamily, expectedMetrics []string) {
foundMetrics := make(map[string]bool)
for _, metric := range metrics {
foundMetrics[metric.GetName()] = true
}
for _, expected := range expectedMetrics {
if _, found := foundMetrics[expected]; !found {
t.Errorf("Master metrics did not include expected metric %q", expected)
}
}
}
func TestMasterProcessMetrics(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
metrics, err := scrapeMetrics(s)
if err != nil {
t.Fatal(err)
}
checkForExpectedMetrics(t, metrics, []string{
"process_start_time_seconds",
"process_cpu_seconds_total",
"go_goroutines",
"process_open_fds",
"process_resident_memory_bytes",
})
}
func TestApiserverMetrics(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
// Make a request to the apiserver to ensure there's at least one data point
// for the metrics we're expecting -- otherwise, they won't be exported.
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
if _, err := client.Core().Pods(v1.NamespaceDefault).List(v1.ListOptions{}); err != nil {
t.Fatalf("unexpected error getting pods: %v", err)
}
metrics, err := scrapeMetrics(s)
if err != nil {
t.Fatal(err)
}
checkForExpectedMetrics(t, metrics, []string{
"apiserver_request_count",
"apiserver_request_latencies",
})
}

View file

@ -0,0 +1,88 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package objectmeta
import (
"testing"
etcd "github.com/coreos/etcd/client"
"github.com/golang/glog"
"github.com/stretchr/testify/assert"
"golang.org/x/net/context"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/genericapiserver"
etcdstorage "k8s.io/kubernetes/pkg/storage/etcd"
"k8s.io/kubernetes/pkg/storage/etcd/etcdtest"
"k8s.io/kubernetes/test/integration/framework"
)
// TODO: Eliminate this v2 client dependency.
func newEtcdClient() etcd.Client {
cfg := etcd.Config{
Endpoints: []string{framework.GetEtcdURLFromEnv()},
}
client, err := etcd.New(cfg)
if err != nil {
glog.Fatalf("unable to connect to etcd for testing: %v", err)
}
return client
}
func TestIgnoreClusterName(t *testing.T) {
config := framework.NewMasterConfig()
prefix := config.StorageFactory.(*genericapiserver.DefaultStorageFactory).StorageConfig.Prefix
_, s := framework.RunAMaster(config)
defer s.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
etcdClient := newEtcdClient()
etcdStorage := etcdstorage.NewEtcdStorage(etcdClient, testapi.Default.Codec(),
prefix+"/namespaces/", false, etcdtest.DeserializationCacheSize)
ctx := context.TODO()
ns := v1.Namespace{
ObjectMeta: v1.ObjectMeta{
Name: "test-namespace",
ClusterName: "cluster-name-to-ignore",
},
}
nsNew, err := client.Core().Namespaces().Create(&ns)
assert.Nil(t, err)
assert.Equal(t, ns.Name, nsNew.Name)
assert.Empty(t, nsNew.ClusterName)
nsEtcd := v1.Namespace{}
err = etcdStorage.Get(ctx, ns.Name, "", &nsEtcd, false)
assert.Nil(t, err)
assert.Equal(t, ns.Name, nsEtcd.Name)
assert.Empty(t, nsEtcd.ClusterName)
nsNew, err = client.Core().Namespaces().Update(&ns)
assert.Nil(t, err)
assert.Equal(t, ns.Name, nsNew.Name)
assert.Empty(t, nsNew.ClusterName)
nsEtcd = v1.Namespace{}
err = etcdStorage.Get(ctx, ns.Name, "", &nsEtcd, false)
assert.Nil(t, err)
assert.Equal(t, ns.Name, nsEtcd.Name)
assert.Empty(t, nsEtcd.ClusterName)
}

View file

@ -0,0 +1,41 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package openshift
import (
"testing"
"k8s.io/kubernetes/pkg/genericapiserver"
"k8s.io/kubernetes/pkg/master"
)
// This test references methods that OpenShift uses to customize the master on startup, that
// are not referenced directly by a master.
func TestMasterExportsSymbols(t *testing.T) {
_ = &master.Config{
GenericConfig: &genericapiserver.Config{
EnableSwaggerSupport: false,
EnableMetrics: true,
},
EnableCoreControllers: false,
EnableUISupport: false,
EnableLogsSupport: false,
}
_ = &master.Master{
GenericAPIServer: &genericapiserver.GenericAPIServer{},
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,182 @@
// +build integration,!no-etcd
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pods
import (
"fmt"
"testing"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/test/integration"
"k8s.io/kubernetes/test/integration/framework"
)
func TestPodUpdateActiveDeadlineSeconds(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
ns := framework.CreateTestingNamespace("pod-activedeadline-update", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
var (
iZero = int64(0)
i30 = int64(30)
i60 = int64(60)
iNeg = int64(-1)
)
prototypePod := func() *v1.Pod {
return &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "xxx",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fake-name",
Image: "fakeimage",
},
},
},
}
}
cases := []struct {
name string
original *int64
update *int64
valid bool
}{
{
name: "no change, nil",
original: nil,
update: nil,
valid: true,
},
{
name: "no change, set",
original: &i30,
update: &i30,
valid: true,
},
{
name: "change to positive from nil",
original: nil,
update: &i60,
valid: true,
},
{
name: "change to smaller positive",
original: &i60,
update: &i30,
valid: true,
},
{
name: "change to larger positive",
original: &i30,
update: &i60,
valid: false,
},
{
name: "change to negative from positive",
original: &i30,
update: &iNeg,
valid: false,
},
{
name: "change to negative from nil",
original: nil,
update: &iNeg,
valid: false,
},
// zero is not allowed, must be a positive integer
{
name: "change to zero from positive",
original: &i30,
update: &iZero,
valid: false,
},
{
name: "change to nil from positive",
original: &i30,
update: nil,
valid: false,
},
}
for i, tc := range cases {
pod := prototypePod()
pod.Spec.ActiveDeadlineSeconds = tc.original
pod.ObjectMeta.Name = fmt.Sprintf("activedeadlineseconds-test-%v", i)
if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil {
t.Errorf("Failed to create pod: %v", err)
}
pod.Spec.ActiveDeadlineSeconds = tc.update
_, err := client.Core().Pods(ns.Name).Update(pod)
if tc.valid && err != nil {
t.Errorf("%v: failed to update pod: %v", tc.name, err)
} else if !tc.valid && err == nil {
t.Errorf("%v: unexpected allowed update to pod", tc.name)
}
integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name)
}
}
func TestPodReadOnlyFilesystem(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
isReadOnly := true
ns := framework.CreateTestingNamespace("pod-readonly-root", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "xxx",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fake-name",
Image: "fakeimage",
SecurityContext: &v1.SecurityContext{
ReadOnlyRootFilesystem: &isReadOnly,
},
},
},
},
}
if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil {
t.Errorf("Failed to create pod: %v", err)
}
integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name)
}

View file

@ -0,0 +1,213 @@
// +build integration,!no-etcd
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package quota
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/controller"
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
resourcequotacontroller "k8s.io/kubernetes/pkg/controller/resourcequota"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
quotainstall "k8s.io/kubernetes/pkg/quota/install"
"k8s.io/kubernetes/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/watch"
"k8s.io/kubernetes/plugin/pkg/admission/resourcequota"
"k8s.io/kubernetes/test/integration/framework"
)
// 1.2 code gets:
// quota_test.go:95: Took 4.218619579s to scale up without quota
// quota_test.go:199: unexpected error: timed out waiting for the condition, ended with 342 pods (1 minute)
// 1.3+ code gets:
// quota_test.go:100: Took 4.196205966s to scale up without quota
// quota_test.go:115: Took 12.021640372s to scale up with quota
func TestQuota(t *testing.T) {
// Set up a master
h := &framework.MasterHolder{Initialized: make(chan struct{})}
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
<-h.Initialized
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
}))
defer s.Close()
admissionCh := make(chan struct{})
clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
admission, err := resourcequota.NewResourceQuota(internalClientset, quotainstall.NewRegistry(nil, nil), 5, admissionCh)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
defer close(admissionCh)
masterConfig := framework.NewIntegrationTestMasterConfig()
masterConfig.GenericConfig.AdmissionControl = admission
framework.RunAMasterUsingServer(masterConfig, s, h)
ns := framework.CreateTestingNamespace("quotaed", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
ns2 := framework.CreateTestingNamespace("non-quotaed", s, t)
defer framework.DeleteTestingNamespace(ns2, s, t)
controllerCh := make(chan struct{})
defer close(controllerCh)
go replicationcontroller.NewReplicationManagerFromClientForIntegration(clientset, controller.NoResyncPeriodFunc, replicationcontroller.BurstReplicas, 4096).
Run(3, controllerCh)
resourceQuotaRegistry := quotainstall.NewRegistry(clientset, nil)
groupKindsToReplenish := []schema.GroupKind{
api.Kind("Pod"),
}
resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{
KubeClient: clientset,
ResyncPeriod: controller.NoResyncPeriodFunc,
Registry: resourceQuotaRegistry,
GroupKindsToReplenish: groupKindsToReplenish,
ReplenishmentResyncPeriod: controller.NoResyncPeriodFunc,
ControllerFactory: resourcequotacontroller.NewReplenishmentControllerFactoryFromClient(clientset),
}
go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(2, controllerCh)
startTime := time.Now()
scale(t, ns2.Name, clientset)
endTime := time.Now()
t.Logf("Took %v to scale up without quota", endTime.Sub(startTime))
quota := &v1.ResourceQuota{
ObjectMeta: v1.ObjectMeta{
Name: "quota",
Namespace: ns.Name,
},
Spec: v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourcePods: resource.MustParse("1000"),
},
},
}
waitForQuota(t, quota, clientset)
startTime = time.Now()
scale(t, "quotaed", clientset)
endTime = time.Now()
t.Logf("Took %v to scale up with quota", endTime.Sub(startTime))
}
func waitForQuota(t *testing.T, quota *v1.ResourceQuota, clientset *clientset.Clientset) {
w, err := clientset.Core().ResourceQuotas(quota.Namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: quota.Name}))
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if _, err := clientset.Core().ResourceQuotas(quota.Namespace).Create(quota); err != nil {
t.Fatalf("unexpected error: %v", err)
}
_, err = watch.Until(1*time.Minute, w, func(event watch.Event) (bool, error) {
switch event.Type {
case watch.Modified:
default:
return false, nil
}
switch cast := event.Object.(type) {
case *v1.ResourceQuota:
if len(cast.Status.Hard) > 0 {
return true, nil
}
}
return false, nil
})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
}
func scale(t *testing.T, namespace string, clientset *clientset.Clientset) {
target := int32(100)
rc := &v1.ReplicationController{
ObjectMeta: v1.ObjectMeta{
Name: "foo",
Namespace: namespace,
},
Spec: v1.ReplicationControllerSpec{
Replicas: &target,
Selector: map[string]string{"foo": "bar"},
Template: &v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"foo": "bar",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "container",
Image: "busybox",
},
},
},
},
},
}
w, err := clientset.Core().ReplicationControllers(namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: rc.Name}))
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if _, err := clientset.Core().ReplicationControllers(namespace).Create(rc); err != nil {
t.Fatalf("unexpected error: %v", err)
}
_, err = watch.Until(3*time.Minute, w, func(event watch.Event) (bool, error) {
switch event.Type {
case watch.Modified:
default:
return false, nil
}
switch cast := event.Object.(type) {
case *v1.ReplicationController:
fmt.Printf("Found %v of %v replicas\n", int(cast.Status.Replicas), target)
if cast.Status.Replicas == target {
return true, nil
}
}
return false, nil
})
if err != nil {
pods, _ := clientset.Core().Pods(namespace).List(v1.ListOptions{LabelSelector: labels.Everything().String(), FieldSelector: fields.Everything().String()})
t.Fatalf("unexpected error: %v, ended with %v pods", err, len(pods.Items))
}
}

View file

@ -0,0 +1,466 @@
// +build integration,!no-etcd
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package replicaset
import (
"fmt"
"net/http/httptest"
"reflect"
"testing"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/replicaset"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/integration/framework"
)
func testLabels() map[string]string {
return map[string]string{"name": "test"}
}
func newRS(name, namespace string, replicas int) *v1beta1.ReplicaSet {
replicasCopy := int32(replicas)
return &v1beta1.ReplicaSet{
TypeMeta: metav1.TypeMeta{
Kind: "ReplicaSet",
APIVersion: "extensions/v1beta1",
},
ObjectMeta: v1.ObjectMeta{
Namespace: namespace,
Name: name,
},
Spec: v1beta1.ReplicaSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: testLabels(),
},
Replicas: &replicasCopy,
Template: v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: testLabels(),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fake-name",
Image: "fakeimage",
},
},
},
},
},
}
}
func newMatchingPod(podName, namespace string) *v1.Pod {
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: v1.ObjectMeta{
Name: podName,
Namespace: namespace,
Labels: testLabels(),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fake-name",
Image: "fakeimage",
},
},
},
Status: v1.PodStatus{
Phase: v1.PodRunning,
},
}
}
// verifyRemainingObjects verifies if the number of the remaining replica
// sets and pods are rsNum and podNum. It returns error if the
// communication with the API server fails.
func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rsNum, podNum int) (bool, error) {
rsClient := clientSet.Extensions().ReplicaSets(namespace)
podClient := clientSet.Core().Pods(namespace)
pods, err := podClient.List(v1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
var ret = true
if len(pods.Items) != podNum {
ret = false
t.Logf("expect %d pods, got %d pods", podNum, len(pods.Items))
}
rss, err := rsClient.List(v1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list replica sets: %v", err)
}
if len(rss.Items) != rsNum {
ret = false
t.Logf("expect %d RSs, got %d RSs", rsNum, len(rss.Items))
}
return ret, nil
}
func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *replicaset.ReplicaSetController, cache.SharedIndexInformer, cache.SharedIndexInformer, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig()
_, s := framework.RunAMaster(masterConfig)
config := restclient.Config{Host: s.URL}
clientSet, err := clientset.NewForConfig(&config)
if err != nil {
t.Fatalf("Error in create clientset: %v", err)
}
resyncPeriod := 12 * time.Hour
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "rs-informers")), nil, resyncPeriod)
rm := replicaset.NewReplicaSetController(
informers.ReplicaSets(),
informers.Pods(),
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replicaset-controller")),
replicaset.BurstReplicas,
4096,
enableGarbageCollector,
)
if err != nil {
t.Fatalf("Failed to create replicaset controller")
}
return s, rm, informers.ReplicaSets().Informer(), informers.Pods().Informer(), clientSet
}
// wait for the podInformer to observe the pods. Call this function before
// running the RS controller to prevent the rc manager from creating new pods
// rather than adopting the existing ones.
func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podNum int) {
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
objects := podInformer.GetIndexer().List()
if len(objects) == podNum {
return true, nil
} else {
return false, nil
}
}); err != nil {
t.Fatal(err)
}
}
func TestAdoption(t *testing.T) {
var trueVar = true
testCases := []struct {
name string
existingOwnerReferences func(rs *v1beta1.ReplicaSet) []v1.OwnerReference
expectedOwnerReferences func(rs *v1beta1.ReplicaSet) []v1.OwnerReference
}{
{
"pod refers rs as an owner, not a controller",
func(rs *v1beta1.ReplicaSet) []v1.OwnerReference {
return []v1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet"}}
},
func(rs *v1beta1.ReplicaSet) []v1.OwnerReference {
return []v1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: &trueVar}}
},
},
{
"pod doesn't have owner references",
func(rs *v1beta1.ReplicaSet) []v1.OwnerReference {
return []v1.OwnerReference{}
},
func(rs *v1beta1.ReplicaSet) []v1.OwnerReference {
return []v1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: &trueVar}}
},
},
{
"pod refers rs as a controller",
func(rs *v1beta1.ReplicaSet) []v1.OwnerReference {
return []v1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: &trueVar}}
},
func(rs *v1beta1.ReplicaSet) []v1.OwnerReference {
return []v1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: &trueVar}}
},
},
{
"pod refers other rs as the controller, refers the rs as an owner",
func(rs *v1beta1.ReplicaSet) []v1.OwnerReference {
return []v1.OwnerReference{
{UID: "1", Name: "anotherRS", APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: &trueVar},
{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet"},
}
},
func(rs *v1beta1.ReplicaSet) []v1.OwnerReference {
return []v1.OwnerReference{
{UID: "1", Name: "anotherRS", APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: &trueVar},
{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet"},
}
},
},
}
for i, tc := range testCases {
s, rm, rsInformer, podInformer, clientSet := rmSetup(t, true)
ns := framework.CreateTestingNamespace(fmt.Sprintf("rs-adoption-%d", i), s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
rsClient := clientSet.Extensions().ReplicaSets(ns.Name)
podClient := clientSet.Core().Pods(ns.Name)
const rsName = "rs"
rs, err := rsClient.Create(newRS(rsName, ns.Name, 1))
if err != nil {
t.Fatalf("Failed to create replica set: %v", err)
}
podName := fmt.Sprintf("pod%d", i)
pod := newMatchingPod(podName, ns.Name)
pod.OwnerReferences = tc.existingOwnerReferences(rs)
_, err = podClient.Create(pod)
if err != nil {
t.Fatalf("Failed to create Pod: %v", err)
}
stopCh := make(chan struct{})
go rsInformer.Run(stopCh)
go podInformer.Run(stopCh)
waitToObservePods(t, podInformer, 1)
go rm.Run(5, stopCh)
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
updatedPod, err := podClient.Get(pod.Name)
if err != nil {
return false, err
}
if e, a := tc.expectedOwnerReferences(rs), updatedPod.OwnerReferences; reflect.DeepEqual(e, a) {
return true, nil
} else {
t.Logf("ownerReferences don't match, expect %v, got %v", e, a)
return false, nil
}
}); err != nil {
t.Fatal(err)
}
close(stopCh)
}
}
func createRSsPods(t *testing.T, clientSet clientset.Interface, rss []*v1beta1.ReplicaSet, pods []*v1.Pod, ns string) {
rsClient := clientSet.Extensions().ReplicaSets(ns)
podClient := clientSet.Core().Pods(ns)
for _, rs := range rss {
if _, err := rsClient.Create(rs); err != nil {
t.Fatalf("Failed to create replica set %s: %v", rs.Name, err)
}
}
for _, pod := range pods {
if _, err := podClient.Create(pod); err != nil {
t.Fatalf("Failed to create pod %s: %v", pod.Name, err)
}
}
}
func waitRSStable(t *testing.T, clientSet clientset.Interface, rs *v1beta1.ReplicaSet, ns string) {
rsClient := clientSet.Extensions().ReplicaSets(ns)
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
updatedRS, err := rsClient.Get(rs.Name)
if err != nil {
return false, err
}
if updatedRS.Status.Replicas != *rs.Spec.Replicas {
return false, nil
} else {
return true, nil
}
}); err != nil {
t.Fatal(err)
}
}
func TestUpdateSelectorToAdopt(t *testing.T) {
// We have pod1, pod2 and rs. rs.spec.replicas=1. At first rs.Selector
// matches pod1 only; change the selector to match pod2 as well. Verify
// there is only one pod left.
s, rm, rsInformer, podInformer, clientSet := rmSetup(t, true)
ns := framework.CreateTestingNamespace("rs-update-selector-to-adopt", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
rs := newRS("rs", ns.Name, 1)
// let rs's selector only match pod1
rs.Spec.Selector.MatchLabels["uniqueKey"] = "1"
rs.Spec.Template.Labels["uniqueKey"] = "1"
pod1 := newMatchingPod("pod1", ns.Name)
pod1.Labels["uniqueKey"] = "1"
pod2 := newMatchingPod("pod2", ns.Name)
pod2.Labels["uniqueKey"] = "2"
createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{pod1, pod2}, ns.Name)
stopCh := make(chan struct{})
go rsInformer.Run(stopCh)
go podInformer.Run(stopCh)
go rm.Run(5, stopCh)
waitRSStable(t, clientSet, rs, ns.Name)
// change the rs's selector to match both pods
patch := `{"spec":{"selector":{"matchLabels": {"uniqueKey":null}}}}`
rsClient := clientSet.Extensions().ReplicaSets(ns.Name)
rs, err := rsClient.Patch(rs.Name, api.StrategicMergePatchType, []byte(patch))
if err != nil {
t.Fatalf("Failed to patch replica set: %v", err)
}
t.Logf("patched rs = %#v", rs)
// wait for the rs select both pods and delete one of them
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
return verifyRemainingObjects(t, clientSet, ns.Name, 1, 1)
}); err != nil {
t.Fatal(err)
}
close(stopCh)
}
func TestUpdateSelectorToRemoveControllerRef(t *testing.T) {
// We have pod1, pod2 and rs. rs.spec.replicas=2. At first rs.Selector
// matches pod1 and pod2; change the selector to match only pod1. Verify
// that rs creates one more pod, so there are 3 pods. Also verify that
// pod2's controllerRef is cleared.
s, rm, rsInformer, podInformer, clientSet := rmSetup(t, true)
ns := framework.CreateTestingNamespace("rs-update-selector-to-remove-controllerref", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
rs := newRS("rs", ns.Name, 2)
pod1 := newMatchingPod("pod1", ns.Name)
pod1.Labels["uniqueKey"] = "1"
pod2 := newMatchingPod("pod2", ns.Name)
pod2.Labels["uniqueKey"] = "2"
createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{pod1, pod2}, ns.Name)
stopCh := make(chan struct{})
go rsInformer.Run(stopCh)
go podInformer.Run(stopCh)
waitToObservePods(t, podInformer, 2)
go rm.Run(5, stopCh)
waitRSStable(t, clientSet, rs, ns.Name)
// change the rs's selector to match both pods
patch := `{"spec":{"selector":{"matchLabels": {"uniqueKey":"1"}},"template":{"metadata":{"labels":{"uniqueKey":"1"}}}}}`
rsClient := clientSet.Extensions().ReplicaSets(ns.Name)
rs, err := rsClient.Patch(rs.Name, api.StrategicMergePatchType, []byte(patch))
if err != nil {
t.Fatalf("Failed to patch replica set: %v", err)
}
t.Logf("patched rs = %#v", rs)
// wait for the rs to create one more pod
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
return verifyRemainingObjects(t, clientSet, ns.Name, 1, 3)
}); err != nil {
t.Fatal(err)
}
podClient := clientSet.Core().Pods(ns.Name)
pod2, err = podClient.Get(pod2.Name)
if err != nil {
t.Fatalf("Failed to get pod2: %v", err)
}
if len(pod2.OwnerReferences) != 0 {
t.Fatalf("ownerReferences of pod2 is not cleared, got %#v", pod2.OwnerReferences)
}
close(stopCh)
}
func TestUpdateLabelToRemoveControllerRef(t *testing.T) {
// We have pod1, pod2 and rs. rs.spec.replicas=2. At first rs.Selector
// matches pod1 and pod2; change pod2's labels to non-matching. Verify
// that rs creates one more pod, so there are 3 pods. Also verify that
// pod2's controllerRef is cleared.
s, rm, rsInformer, podInformer, clientSet := rmSetup(t, true)
ns := framework.CreateTestingNamespace("rs-update-label-to-remove-controllerref", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
rs := newRS("rs", ns.Name, 2)
pod1 := newMatchingPod("pod1", ns.Name)
pod2 := newMatchingPod("pod2", ns.Name)
createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{pod1, pod2}, ns.Name)
stopCh := make(chan struct{})
go rsInformer.Run(stopCh)
go podInformer.Run(stopCh)
go rm.Run(5, stopCh)
waitRSStable(t, clientSet, rs, ns.Name)
// change the rs's selector to match both pods
patch := `{"metadata":{"labels":{"name":null}}}`
podClient := clientSet.Core().Pods(ns.Name)
pod2, err := podClient.Patch(pod2.Name, api.StrategicMergePatchType, []byte(patch))
if err != nil {
t.Fatalf("Failed to patch pod2: %v", err)
}
t.Logf("patched pod2 = %#v", pod2)
// wait for the rs to create one more pod
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
return verifyRemainingObjects(t, clientSet, ns.Name, 1, 3)
}); err != nil {
t.Fatal(err)
}
pod2, err = podClient.Get(pod2.Name)
if err != nil {
t.Fatalf("Failed to get pod2: %v", err)
}
if len(pod2.OwnerReferences) != 0 {
t.Fatalf("ownerReferences of pod2 is not cleared, got %#v", pod2.OwnerReferences)
}
close(stopCh)
}
func TestUpdateLabelToBeAdopted(t *testing.T) {
// We have pod1, pod2 and rs. rs.spec.replicas=1. At first rs.Selector
// matches pod1 only; change pod2's labels to be matching. Verify the RS
// controller adopts pod2 and delete one of them, so there is only 1 pod
// left.
s, rm, rsInformer, podInformer, clientSet := rmSetup(t, true)
ns := framework.CreateTestingNamespace("rs-update-label-to-be-adopted", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
rs := newRS("rs", ns.Name, 1)
// let rs's selector only matches pod1
rs.Spec.Selector.MatchLabels["uniqueKey"] = "1"
rs.Spec.Template.Labels["uniqueKey"] = "1"
pod1 := newMatchingPod("pod1", ns.Name)
pod1.Labels["uniqueKey"] = "1"
pod2 := newMatchingPod("pod2", ns.Name)
pod2.Labels["uniqueKey"] = "2"
createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{pod1, pod2}, ns.Name)
stopCh := make(chan struct{})
go rsInformer.Run(stopCh)
go podInformer.Run(stopCh)
go rm.Run(5, stopCh)
waitRSStable(t, clientSet, rs, ns.Name)
// change the rs's selector to match both pods
patch := `{"metadata":{"labels":{"uniqueKey":"1"}}}`
podClient := clientSet.Core().Pods(ns.Name)
pod2, err := podClient.Patch(pod2.Name, api.StrategicMergePatchType, []byte(patch))
if err != nil {
t.Fatalf("Failed to patch pod2: %v", err)
}
t.Logf("patched pod2 = %#v", pod2)
// wait for the rs to select both pods and delete one of them
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
return verifyRemainingObjects(t, clientSet, ns.Name, 1, 1)
}); err != nil {
t.Fatal(err)
}
close(stopCh)
}

View file

@ -0,0 +1,460 @@
// +build integration,!no-etcd
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package replicationcontroller
import (
"fmt"
"net/http/httptest"
"reflect"
"testing"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/replication"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/integration/framework"
)
func testLabels() map[string]string {
return map[string]string{"name": "test"}
}
func newRC(name, namespace string, replicas int) *v1.ReplicationController {
replicasCopy := int32(replicas)
return &v1.ReplicationController{
TypeMeta: metav1.TypeMeta{
Kind: "ReplicationController",
APIVersion: "v1",
},
ObjectMeta: v1.ObjectMeta{
Namespace: namespace,
Name: name,
},
Spec: v1.ReplicationControllerSpec{
Selector: testLabels(),
Replicas: &replicasCopy,
Template: &v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: testLabels(),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fake-name",
Image: "fakeimage",
},
},
},
},
},
}
}
func newMatchingPod(podName, namespace string) *v1.Pod {
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: v1.ObjectMeta{
Name: podName,
Namespace: namespace,
Labels: testLabels(),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fake-name",
Image: "fakeimage",
},
},
},
Status: v1.PodStatus{
Phase: v1.PodRunning,
},
}
}
// verifyRemainingObjects verifies if the number of the remaining replication
// controllers and pods are rcNum and podNum. It returns error if the
// communication with the API server fails.
func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rcNum, podNum int) (bool, error) {
rcClient := clientSet.Core().ReplicationControllers(namespace)
podClient := clientSet.Core().Pods(namespace)
pods, err := podClient.List(v1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
var ret = true
if len(pods.Items) != podNum {
ret = false
t.Logf("expect %d pods, got %d pods", podNum, len(pods.Items))
}
rcs, err := rcClient.List(v1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list replication controllers: %v", err)
}
if len(rcs.Items) != rcNum {
ret = false
t.Logf("expect %d RCs, got %d RCs", rcNum, len(rcs.Items))
}
return ret, nil
}
func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *replication.ReplicationManager, cache.SharedIndexInformer, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig()
_, s := framework.RunAMaster(masterConfig)
config := restclient.Config{Host: s.URL}
clientSet, err := clientset.NewForConfig(&config)
if err != nil {
t.Fatalf("Error in create clientset: %v", err)
}
resyncPeriod := 12 * time.Hour
resyncPeriodFunc := func() time.Duration {
return resyncPeriod
}
podInformer := informers.NewPodInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pod-informer")), resyncPeriod)
rm := replication.NewReplicationManager(
podInformer,
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replication-controller")),
resyncPeriodFunc,
replication.BurstReplicas,
4096,
enableGarbageCollector,
)
if err != nil {
t.Fatalf("Failed to create replication manager")
}
return s, rm, podInformer, clientSet
}
// wait for the podInformer to observe the pods. Call this function before
// running the RC manager to prevent the rc manager from creating new pods
// rather than adopting the existing ones.
func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podNum int) {
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
objects := podInformer.GetIndexer().List()
if len(objects) == podNum {
return true, nil
} else {
return false, nil
}
}); err != nil {
t.Fatal(err)
}
}
func TestAdoption(t *testing.T) {
var trueVar = true
testCases := []struct {
name string
existingOwnerReferences func(rc *v1.ReplicationController) []v1.OwnerReference
expectedOwnerReferences func(rc *v1.ReplicationController) []v1.OwnerReference
}{
{
"pod refers rc as an owner, not a controller",
func(rc *v1.ReplicationController) []v1.OwnerReference {
return []v1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController"}}
},
func(rc *v1.ReplicationController) []v1.OwnerReference {
return []v1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: &trueVar}}
},
},
{
"pod doesn't have owner references",
func(rc *v1.ReplicationController) []v1.OwnerReference {
return []v1.OwnerReference{}
},
func(rc *v1.ReplicationController) []v1.OwnerReference {
return []v1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: &trueVar}}
},
},
{
"pod refers rc as a controller",
func(rc *v1.ReplicationController) []v1.OwnerReference {
return []v1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: &trueVar}}
},
func(rc *v1.ReplicationController) []v1.OwnerReference {
return []v1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: &trueVar}}
},
},
{
"pod refers other rc as the controller, refers the rc as an owner",
func(rc *v1.ReplicationController) []v1.OwnerReference {
return []v1.OwnerReference{
{UID: "1", Name: "anotherRC", APIVersion: "v1", Kind: "ReplicationController", Controller: &trueVar},
{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController"},
}
},
func(rc *v1.ReplicationController) []v1.OwnerReference {
return []v1.OwnerReference{
{UID: "1", Name: "anotherRC", APIVersion: "v1", Kind: "ReplicationController", Controller: &trueVar},
{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController"},
}
},
},
}
for i, tc := range testCases {
s, rm, podInformer, clientSet := rmSetup(t, true)
ns := framework.CreateTestingNamespace(fmt.Sprintf("adoption-%d", i), s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
rcClient := clientSet.Core().ReplicationControllers(ns.Name)
podClient := clientSet.Core().Pods(ns.Name)
const rcName = "rc"
rc, err := rcClient.Create(newRC(rcName, ns.Name, 1))
if err != nil {
t.Fatalf("Failed to create replication controller: %v", err)
}
podName := fmt.Sprintf("pod%d", i)
pod := newMatchingPod(podName, ns.Name)
pod.OwnerReferences = tc.existingOwnerReferences(rc)
_, err = podClient.Create(pod)
if err != nil {
t.Fatalf("Failed to create Pod: %v", err)
}
stopCh := make(chan struct{})
go podInformer.Run(stopCh)
waitToObservePods(t, podInformer, 1)
go rm.Run(5, stopCh)
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
updatedPod, err := podClient.Get(pod.Name)
if err != nil {
return false, err
}
if e, a := tc.expectedOwnerReferences(rc), updatedPod.OwnerReferences; reflect.DeepEqual(e, a) {
return true, nil
} else {
t.Logf("ownerReferences don't match, expect %v, got %v", e, a)
return false, nil
}
}); err != nil {
t.Fatal(err)
}
close(stopCh)
}
}
func createRCsPods(t *testing.T, clientSet clientset.Interface, rcs []*v1.ReplicationController, pods []*v1.Pod, ns string) {
rcClient := clientSet.Core().ReplicationControllers(ns)
podClient := clientSet.Core().Pods(ns)
for _, rc := range rcs {
if _, err := rcClient.Create(rc); err != nil {
t.Fatalf("Failed to create replication controller %s: %v", rc.Name, err)
}
}
for _, pod := range pods {
if _, err := podClient.Create(pod); err != nil {
t.Fatalf("Failed to create pod %s: %v", pod.Name, err)
}
}
}
func waitRCStable(t *testing.T, clientSet clientset.Interface, rc *v1.ReplicationController, ns string) {
rcClient := clientSet.Core().ReplicationControllers(ns)
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
updatedRC, err := rcClient.Get(rc.Name)
if err != nil {
return false, err
}
if updatedRC.Status.Replicas != *rc.Spec.Replicas {
return false, nil
} else {
return true, nil
}
}); err != nil {
t.Fatal(err)
}
}
func TestUpdateSelectorToAdopt(t *testing.T) {
// We have pod1, pod2 and rc. rc.spec.replicas=1. At first rc.Selector
// matches pod1 only; change the selector to match pod2 as well. Verify
// there is only one pod left.
s, rm, podInformer, clientSet := rmSetup(t, true)
ns := framework.CreateTestingNamespace("update-selector-to-adopt", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
rc := newRC("rc", ns.Name, 1)
// let rc's selector only match pod1
rc.Spec.Selector["uniqueKey"] = "1"
rc.Spec.Template.Labels["uniqueKey"] = "1"
pod1 := newMatchingPod("pod1", ns.Name)
pod1.Labels["uniqueKey"] = "1"
pod2 := newMatchingPod("pod2", ns.Name)
pod2.Labels["uniqueKey"] = "2"
createRCsPods(t, clientSet, []*v1.ReplicationController{rc}, []*v1.Pod{pod1, pod2}, ns.Name)
stopCh := make(chan struct{})
go podInformer.Run(stopCh)
go rm.Run(5, stopCh)
waitRCStable(t, clientSet, rc, ns.Name)
// change the rc's selector to match both pods
patch := `{"spec":{"selector":{"uniqueKey":null}}}`
rcClient := clientSet.Core().ReplicationControllers(ns.Name)
rc, err := rcClient.Patch(rc.Name, api.StrategicMergePatchType, []byte(patch))
if err != nil {
t.Fatalf("Failed to patch replication controller: %v", err)
}
t.Logf("patched rc = %#v", rc)
// wait for the rc select both pods and delete one of them
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
return verifyRemainingObjects(t, clientSet, ns.Name, 1, 1)
}); err != nil {
t.Fatal(err)
}
close(stopCh)
}
func TestUpdateSelectorToRemoveControllerRef(t *testing.T) {
// We have pod1, pod2 and rc. rc.spec.replicas=2. At first rc.Selector
// matches pod1 and pod2; change the selector to match only pod1. Verify
// that rc creates one more pod, so there are 3 pods. Also verify that
// pod2's controllerRef is cleared.
s, rm, podInformer, clientSet := rmSetup(t, true)
ns := framework.CreateTestingNamespace("update-selector-to-remove-controllerref", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
rc := newRC("rc", ns.Name, 2)
pod1 := newMatchingPod("pod1", ns.Name)
pod1.Labels["uniqueKey"] = "1"
pod2 := newMatchingPod("pod2", ns.Name)
pod2.Labels["uniqueKey"] = "2"
createRCsPods(t, clientSet, []*v1.ReplicationController{rc}, []*v1.Pod{pod1, pod2}, ns.Name)
stopCh := make(chan struct{})
go podInformer.Run(stopCh)
waitToObservePods(t, podInformer, 2)
go rm.Run(5, stopCh)
waitRCStable(t, clientSet, rc, ns.Name)
// change the rc's selector to match both pods
patch := `{"spec":{"selector":{"uniqueKey":"1"},"template":{"metadata":{"labels":{"uniqueKey":"1"}}}}}`
rcClient := clientSet.Core().ReplicationControllers(ns.Name)
rc, err := rcClient.Patch(rc.Name, api.StrategicMergePatchType, []byte(patch))
if err != nil {
t.Fatalf("Failed to patch replication controller: %v", err)
}
t.Logf("patched rc = %#v", rc)
// wait for the rc to create one more pod
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
return verifyRemainingObjects(t, clientSet, ns.Name, 1, 3)
}); err != nil {
t.Fatal(err)
}
podClient := clientSet.Core().Pods(ns.Name)
pod2, err = podClient.Get(pod2.Name)
if err != nil {
t.Fatalf("Failed to get pod2: %v", err)
}
if len(pod2.OwnerReferences) != 0 {
t.Fatalf("ownerReferences of pod2 is not cleared, got %#v", pod2.OwnerReferences)
}
close(stopCh)
}
func TestUpdateLabelToRemoveControllerRef(t *testing.T) {
// We have pod1, pod2 and rc. rc.spec.replicas=2. At first rc.Selector
// matches pod1 and pod2; change pod2's labels to non-matching. Verify
// that rc creates one more pod, so there are 3 pods. Also verify that
// pod2's controllerRef is cleared.
s, rm, podInformer, clientSet := rmSetup(t, true)
ns := framework.CreateTestingNamespace("update-label-to-remove-controllerref", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
rc := newRC("rc", ns.Name, 2)
pod1 := newMatchingPod("pod1", ns.Name)
pod2 := newMatchingPod("pod2", ns.Name)
createRCsPods(t, clientSet, []*v1.ReplicationController{rc}, []*v1.Pod{pod1, pod2}, ns.Name)
stopCh := make(chan struct{})
go podInformer.Run(stopCh)
go rm.Run(5, stopCh)
waitRCStable(t, clientSet, rc, ns.Name)
// change the rc's selector to match both pods
patch := `{"metadata":{"labels":{"name":null}}}`
podClient := clientSet.Core().Pods(ns.Name)
pod2, err := podClient.Patch(pod2.Name, api.StrategicMergePatchType, []byte(patch))
if err != nil {
t.Fatalf("Failed to patch pod2: %v", err)
}
t.Logf("patched pod2 = %#v", pod2)
// wait for the rc to create one more pod
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
return verifyRemainingObjects(t, clientSet, ns.Name, 1, 3)
}); err != nil {
t.Fatal(err)
}
pod2, err = podClient.Get(pod2.Name)
if err != nil {
t.Fatalf("Failed to get pod2: %v", err)
}
if len(pod2.OwnerReferences) != 0 {
t.Fatalf("ownerReferences of pod2 is not cleared, got %#v", pod2.OwnerReferences)
}
close(stopCh)
}
func TestUpdateLabelToBeAdopted(t *testing.T) {
// We have pod1, pod2 and rc. rc.spec.replicas=1. At first rc.Selector
// matches pod1 only; change pod2's labels to be matching. Verify the RC
// controller adopts pod2 and delete one of them, so there is only 1 pod
// left.
s, rm, podInformer, clientSet := rmSetup(t, true)
ns := framework.CreateTestingNamespace("update-label-to-be-adopted", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
rc := newRC("rc", ns.Name, 1)
// let rc's selector only matches pod1
rc.Spec.Selector["uniqueKey"] = "1"
rc.Spec.Template.Labels["uniqueKey"] = "1"
pod1 := newMatchingPod("pod1", ns.Name)
pod1.Labels["uniqueKey"] = "1"
pod2 := newMatchingPod("pod2", ns.Name)
pod2.Labels["uniqueKey"] = "2"
createRCsPods(t, clientSet, []*v1.ReplicationController{rc}, []*v1.Pod{pod1, pod2}, ns.Name)
stopCh := make(chan struct{})
go podInformer.Run(stopCh)
go rm.Run(5, stopCh)
waitRCStable(t, clientSet, rc, ns.Name)
// change the rc's selector to match both pods
patch := `{"metadata":{"labels":{"uniqueKey":"1"}}}`
podClient := clientSet.Core().Pods(ns.Name)
pod2, err := podClient.Patch(pod2.Name, api.StrategicMergePatchType, []byte(patch))
if err != nil {
t.Fatalf("Failed to patch pod2: %v", err)
}
t.Logf("patched pod2 = %#v", pod2)
// wait for the rc to select both pods and delete one of them
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
return verifyRemainingObjects(t, clientSet, ns.Name, 1, 1)
}); err != nil {
t.Fatal(err)
}
close(stopCh)
}

View file

@ -0,0 +1,307 @@
// +build integration,!no-etcd
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduler
// This file tests scheduler extender.
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/plugin/pkg/scheduler"
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
e2e "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/integration/framework"
)
const (
filter = "filter"
prioritize = "prioritize"
)
type fitPredicate func(pod *v1.Pod, node *v1.Node) (bool, error)
type priorityFunc func(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error)
type priorityConfig struct {
function priorityFunc
weight int
}
type Extender struct {
name string
predicates []fitPredicate
prioritizers []priorityConfig
}
func (e *Extender) serveHTTP(t *testing.T, w http.ResponseWriter, req *http.Request) {
var args schedulerapi.ExtenderArgs
decoder := json.NewDecoder(req.Body)
defer req.Body.Close()
if err := decoder.Decode(&args); err != nil {
http.Error(w, "Decode error", http.StatusBadRequest)
return
}
encoder := json.NewEncoder(w)
if strings.Contains(req.URL.Path, filter) {
resp := &schedulerapi.ExtenderFilterResult{}
nodes, failedNodes, err := e.Filter(&args.Pod, &args.Nodes)
if err != nil {
resp.Error = err.Error()
} else {
resp.Nodes = *nodes
resp.FailedNodes = failedNodes
}
if err := encoder.Encode(resp); err != nil {
t.Fatalf("Failed to encode %+v", resp)
}
} else if strings.Contains(req.URL.Path, prioritize) {
// Prioritize errors are ignored. Default k8s priorities or another extender's
// priorities may be applied.
priorities, _ := e.Prioritize(&args.Pod, &args.Nodes)
if err := encoder.Encode(priorities); err != nil {
t.Fatalf("Failed to encode %+v", priorities)
}
} else {
http.Error(w, "Unknown method", http.StatusNotFound)
}
}
func (e *Extender) Filter(pod *v1.Pod, nodes *v1.NodeList) (*v1.NodeList, schedulerapi.FailedNodesMap, error) {
filtered := []v1.Node{}
failedNodesMap := schedulerapi.FailedNodesMap{}
for _, node := range nodes.Items {
fits := true
for _, predicate := range e.predicates {
fit, err := predicate(pod, &node)
if err != nil {
return &v1.NodeList{}, schedulerapi.FailedNodesMap{}, err
}
if !fit {
fits = false
break
}
}
if fits {
filtered = append(filtered, node)
} else {
failedNodesMap[node.Name] = fmt.Sprintf("extender failed: %s", e.name)
}
}
return &v1.NodeList{Items: filtered}, failedNodesMap, nil
}
func (e *Extender) Prioritize(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
result := schedulerapi.HostPriorityList{}
combinedScores := map[string]int{}
for _, prioritizer := range e.prioritizers {
weight := prioritizer.weight
if weight == 0 {
continue
}
priorityFunc := prioritizer.function
prioritizedList, err := priorityFunc(pod, nodes)
if err != nil {
return &schedulerapi.HostPriorityList{}, err
}
for _, hostEntry := range *prioritizedList {
combinedScores[hostEntry.Host] += hostEntry.Score * weight
}
}
for host, score := range combinedScores {
result = append(result, schedulerapi.HostPriority{Host: host, Score: score})
}
return &result, nil
}
func machine_1_2_3_Predicate(pod *v1.Pod, node *v1.Node) (bool, error) {
if node.Name == "machine1" || node.Name == "machine2" || node.Name == "machine3" {
return true, nil
}
return false, nil
}
func machine_2_3_5_Predicate(pod *v1.Pod, node *v1.Node) (bool, error) {
if node.Name == "machine2" || node.Name == "machine3" || node.Name == "machine5" {
return true, nil
}
return false, nil
}
func machine_2_Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
result := schedulerapi.HostPriorityList{}
for _, node := range nodes.Items {
score := 1
if node.Name == "machine2" {
score = 10
}
result = append(result, schedulerapi.HostPriority{node.Name, score})
}
return &result, nil
}
func machine_3_Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
result := schedulerapi.HostPriorityList{}
for _, node := range nodes.Items {
score := 1
if node.Name == "machine3" {
score = 10
}
result = append(result, schedulerapi.HostPriority{node.Name, score})
}
return &result, nil
}
func TestSchedulerExtender(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
ns := framework.CreateTestingNamespace("scheduler-extender", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
extender1 := &Extender{
name: "extender1",
predicates: []fitPredicate{machine_1_2_3_Predicate},
prioritizers: []priorityConfig{{machine_2_Prioritizer, 1}},
}
es1 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
extender1.serveHTTP(t, w, req)
}))
defer es1.Close()
extender2 := &Extender{
name: "extender2",
predicates: []fitPredicate{machine_2_3_5_Predicate},
prioritizers: []priorityConfig{{machine_3_Prioritizer, 1}},
}
es2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
extender2.serveHTTP(t, w, req)
}))
defer es2.Close()
policy := schedulerapi.Policy{
ExtenderConfigs: []schedulerapi.ExtenderConfig{
{
URLPrefix: es1.URL,
FilterVerb: filter,
PrioritizeVerb: prioritize,
Weight: 3,
EnableHttps: false,
},
{
URLPrefix: es2.URL,
FilterVerb: filter,
PrioritizeVerb: prioritize,
Weight: 4,
EnableHttps: false,
},
},
}
policy.APIVersion = registered.GroupOrDie(v1.GroupName).GroupVersion.String()
schedulerConfigFactory := factory.NewConfigFactory(clientSet, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
schedulerConfig, err := schedulerConfigFactory.CreateFromConfig(policy)
if err != nil {
t.Fatalf("Couldn't create scheduler config: %v", err)
}
eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: v1.DefaultSchedulerName})
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: clientSet.Core().Events("")})
scheduler.New(schedulerConfig).Run()
defer close(schedulerConfig.StopEverything)
DoTestPodScheduling(ns, t, clientSet)
}
func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface) {
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (Nodes).
defer cs.Core().Nodes().DeleteCollection(nil, v1.ListOptions{})
goodCondition := v1.NodeCondition{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
Reason: fmt.Sprintf("schedulable condition"),
LastHeartbeatTime: metav1.Time{time.Now()},
}
node := &v1.Node{
Spec: v1.NodeSpec{Unschedulable: false},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
},
Conditions: []v1.NodeCondition{goodCondition},
},
}
for ii := 0; ii < 5; ii++ {
node.Name = fmt.Sprintf("machine%d", ii+1)
if _, err := cs.Core().Nodes().Create(node); err != nil {
t.Fatalf("Failed to create nodes: %v", err)
}
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{Name: "extender-test-pod"},
Spec: v1.PodSpec{
Containers: []v1.Container{{Name: "container", Image: e2e.GetPauseImageName(cs)}},
},
}
myPod, err := cs.Core().Pods(ns.Name).Create(pod)
if err != nil {
t.Fatalf("Failed to create pod: %v", err)
}
err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(cs, myPod.Namespace, myPod.Name))
if err != nil {
t.Fatalf("Failed to schedule pod: %v", err)
}
if myPod, err := cs.Core().Pods(ns.Name).Get(myPod.Name); err != nil {
t.Fatalf("Failed to get pod: %v", err)
} else if myPod.Spec.NodeName != "machine3" {
t.Fatalf("Failed to schedule using extender, expected machine3, got %v", myPod.Spec.NodeName)
}
t.Logf("Scheduled pod using extenders")
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,48 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["util.go"],
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/apimachinery/registered:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/core/v1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/client/restclient:go_default_library",
"//plugin/pkg/scheduler:go_default_library",
"//plugin/pkg/scheduler/algorithmprovider:go_default_library",
"//plugin/pkg/scheduler/factory:go_default_library",
"//test/integration/framework:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = [
"scheduler_bench_test.go",
"scheduler_test.go",
],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//plugin/pkg/scheduler/factory:go_default_library",
"//test/integration/framework:go_default_library",
"//test/utils:go_default_library",
"//vendor:github.com/golang/glog",
"//vendor:github.com/renstrom/dedent",
],
)

View file

@ -0,0 +1,45 @@
Scheduler Performance Test
======
Motivation
------
We already have a performance testing system -- Kubemark. However, Kubemark requires setting up and bootstrapping a whole cluster, which takes a lot of time.
We want to have a standard way to reproduce scheduling latency metrics result and benchmark scheduler as simple and fast as possible. We have the following goals:
- Save time on testing
- The test and benchmark can be run in a single box.
We only set up components necessary to scheduling without booting up a cluster.
- Profiling runtime metrics to find out bottleneck
- Write scheduler integration test but focus on performance measurement.
Take advantage of go profiling tools and collect fine-grained metrics,
like cpu-profiling, memory-profiling and block-profiling.
- Reproduce test result easily
- We want to have a known place to do the performance related test for scheduler.
Developers should just run one script to collect all the information they need.
Currently the test suite has the following:
- density test (by adding a new Go test)
- schedule 30k pods on 1000 (fake) nodes and 3k pods on 100 (fake) nodes
- print out scheduling rate every second
- let you learn the rate changes vs number of scheduled pods
- benchmark
- make use of `go test -bench` and report nanosecond/op.
- schedule b.N pods when the cluster has N nodes and P scheduled pods. Since it takes relatively long time to finish one round, b.N is small: 10 - 100.
How To Run
------
```
cd kubernetes/test/component/scheduler/perf
./test-performance.sh
```
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/test/component/scheduler/perf/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/test/integration/scheduler_perf/README.md?pixel)]()

View file

@ -0,0 +1,100 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package benchmark
import (
"testing"
"time"
"k8s.io/kubernetes/test/integration/framework"
testutils "k8s.io/kubernetes/test/utils"
"github.com/golang/glog"
)
// BenchmarkScheduling100Nodes0Pods benchmarks the scheduling rate
// when the cluster has 100 nodes and 0 scheduled pods
func BenchmarkScheduling100Nodes0Pods(b *testing.B) {
benchmarkScheduling(100, 0, b)
}
// BenchmarkScheduling100Nodes1000Pods benchmarks the scheduling rate
// when the cluster has 100 nodes and 1000 scheduled pods
func BenchmarkScheduling100Nodes1000Pods(b *testing.B) {
benchmarkScheduling(100, 1000, b)
}
// BenchmarkScheduling1000Nodes0Pods benchmarks the scheduling rate
// when the cluster has 1000 nodes and 0 scheduled pods
func BenchmarkScheduling1000Nodes0Pods(b *testing.B) {
benchmarkScheduling(1000, 0, b)
}
// BenchmarkScheduling1000Nodes1000Pods benchmarks the scheduling rate
// when the cluster has 1000 nodes and 1000 scheduled pods
func BenchmarkScheduling1000Nodes1000Pods(b *testing.B) {
benchmarkScheduling(1000, 1000, b)
}
// benchmarkScheduling benchmarks scheduling rate with specific number of nodes
// and specific number of pods already scheduled. Since an operation takes relatively
// long time, b.N should be small: 10 - 100.
func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) {
schedulerConfigFactory, finalFunc := mustSetupScheduler()
defer finalFunc()
c := schedulerConfigFactory.Client
nodePreparer := framework.NewIntegrationTestNodePreparer(
c,
[]testutils.CountToStrategy{{Count: numNodes, Strategy: &testutils.TrivialNodePrepareStrategy{}}},
"scheduler-perf-",
)
if err := nodePreparer.PrepareNodes(); err != nil {
glog.Fatalf("%v", err)
}
defer nodePreparer.CleanupNodes()
config := testutils.NewTestPodCreatorConfig()
config.AddStrategy("sched-test", numScheduledPods, testutils.NewSimpleWithControllerCreatePodStrategy("rc1"))
podCreator := testutils.NewTestPodCreator(c, config)
podCreator.CreatePods()
for {
scheduled := schedulerConfigFactory.ScheduledPodLister.Indexer.List()
if len(scheduled) >= numScheduledPods {
break
}
time.Sleep(1 * time.Second)
}
// start benchmark
b.ResetTimer()
config = testutils.NewTestPodCreatorConfig()
config.AddStrategy("sched-test", b.N, testutils.NewSimpleWithControllerCreatePodStrategy("rc2"))
podCreator = testutils.NewTestPodCreator(c, config)
podCreator.CreatePods()
for {
// This can potentially affect performance of scheduler, since List() is done under mutex.
// TODO: Setup watch on apiserver and wait until all pods scheduled.
scheduled := schedulerConfigFactory.ScheduledPodLister.Indexer.List()
if len(scheduled) >= numScheduledPods+b.N {
break
}
// Note: This might introduce slight deviation in accuracy of benchmark results.
// Since the total amount of time is relatively large, it might not be a concern.
time.Sleep(100 * time.Millisecond)
}
}

View file

@ -0,0 +1,219 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package benchmark
import (
"fmt"
"math"
"testing"
"time"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
"k8s.io/kubernetes/test/integration/framework"
testutils "k8s.io/kubernetes/test/utils"
"github.com/golang/glog"
"github.com/renstrom/dedent"
)
const (
threshold3K = 100
threshold30K = 30
threshold60K = 30
)
// TestSchedule100Node3KPods schedules 3k pods on 100 nodes.
func TestSchedule100Node3KPods(t *testing.T) {
if testing.Short() {
t.Skip("Skipping because we want to run short tests")
}
config := defaultSchedulerBenchmarkConfig(100, 3000)
if min := schedulePods(config); min < threshold3K {
t.Errorf("To small pod scheduling throughput for 3k pods. Expected %v got %v", threshold3K, min)
} else {
fmt.Printf("Minimal observed throughput for 3k pod test: %v\n", min)
}
}
// TestSchedule100Node3KPods schedules 3k pods using Node affinity on 100 nodes.
func TestSchedule100Node3KNodeAffinityPods(t *testing.T) {
if testing.Short() {
t.Skip("Skipping because we want to run short tests")
}
config := baseConfig()
config.numNodes = 100
config.numPods = 3000
// number of Node-Pod sets with Pods NodeAffinity matching given Nodes.
numGroups := 10
nodeAffinityKey := "kubernetes.io/sched-perf-node-affinity"
nodeStrategies := make([]testutils.CountToStrategy, 0, 10)
for i := 0; i < numGroups; i++ {
nodeStrategies = append(nodeStrategies, testutils.CountToStrategy{
Count: config.numNodes / numGroups,
Strategy: testutils.NewLabelNodePrepareStrategy(nodeAffinityKey, fmt.Sprintf("%v", i)),
})
}
config.nodePreparer = framework.NewIntegrationTestNodePreparer(
config.schedulerConfigFactory.Client,
nodeStrategies,
"scheduler-perf-",
)
affinityTemplate := dedent.Dedent(`
{
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [{
"matchExpressions": [{
"key": "` + nodeAffinityKey + `",
"operator": "In",
"values": ["%v"]
}]
}]
}
}
}`)
podCreatorConfig := testutils.NewTestPodCreatorConfig()
for i := 0; i < numGroups; i++ {
podCreatorConfig.AddStrategy("sched-perf-node-affinity", config.numPods/numGroups,
testutils.NewCustomCreatePodStrategy(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
GenerateName: "sched-perf-node-affinity-pod-",
Annotations: map[string]string{v1.AffinityAnnotationKey: fmt.Sprintf(affinityTemplate, i)},
},
Spec: testutils.MakePodSpec(),
}),
)
}
config.podCreator = testutils.NewTestPodCreator(config.schedulerConfigFactory.Client, podCreatorConfig)
if min := schedulePods(config); min < threshold30K {
t.Errorf("To small pod scheduling throughput for 30k pods. Expected %v got %v", threshold30K, min)
} else {
fmt.Printf("Minimal observed throughput for 30k pod test: %v\n", min)
}
}
// TestSchedule1000Node30KPods schedules 30k pods on 1000 nodes.
func TestSchedule1000Node30KPods(t *testing.T) {
if testing.Short() {
t.Skip("Skipping because we want to run short tests")
}
config := defaultSchedulerBenchmarkConfig(1000, 30000)
if min := schedulePods(config); min < threshold30K {
t.Errorf("To small pod scheduling throughput for 30k pods. Expected %v got %v", threshold30K, min)
} else {
fmt.Printf("Minimal observed throughput for 30k pod test: %v\n", min)
}
}
// TestSchedule2000Node60KPods schedules 60k pods on 2000 nodes.
// This test won't fit in normal 10 minutes time window.
// func TestSchedule2000Node60KPods(t *testing.T) {
// if testing.Short() {
// t.Skip("Skipping because we want to run short tests")
// }
// config := defaultSchedulerBenchmarkConfig(2000, 60000)
// if min := schedulePods(config); min < threshold60K {
// t.Errorf("To small pod scheduling throughput for 60k pods. Expected %v got %v", threshold60K, min)
// } else {
// fmt.Printf("Minimal observed throughput for 60k pod test: %v\n", min)
// }
// }
type testConfig struct {
numPods int
numNodes int
nodePreparer testutils.TestNodePreparer
podCreator *testutils.TestPodCreator
schedulerConfigFactory *factory.ConfigFactory
destroyFunc func()
}
func baseConfig() *testConfig {
schedulerConfigFactory, destroyFunc := mustSetupScheduler()
return &testConfig{
schedulerConfigFactory: schedulerConfigFactory,
destroyFunc: destroyFunc,
}
}
func defaultSchedulerBenchmarkConfig(numNodes, numPods int) *testConfig {
baseConfig := baseConfig()
nodePreparer := framework.NewIntegrationTestNodePreparer(
baseConfig.schedulerConfigFactory.Client,
[]testutils.CountToStrategy{{Count: numNodes, Strategy: &testutils.TrivialNodePrepareStrategy{}}},
"scheduler-perf-",
)
config := testutils.NewTestPodCreatorConfig()
config.AddStrategy("sched-test", numPods, testutils.NewSimpleWithControllerCreatePodStrategy("rc1"))
podCreator := testutils.NewTestPodCreator(baseConfig.schedulerConfigFactory.Client, config)
baseConfig.nodePreparer = nodePreparer
baseConfig.podCreator = podCreator
baseConfig.numPods = numPods
baseConfig.numNodes = numNodes
return baseConfig
}
// schedulePods schedules specific number of pods on specific number of nodes.
// This is used to learn the scheduling throughput on various
// sizes of cluster and changes as more and more pods are scheduled.
// It won't stop until all pods are scheduled.
// It retruns the minimum of throughput over whole run.
func schedulePods(config *testConfig) int32 {
defer config.destroyFunc()
if err := config.nodePreparer.PrepareNodes(); err != nil {
glog.Fatalf("%v", err)
}
defer config.nodePreparer.CleanupNodes()
config.podCreator.CreatePods()
prev := 0
minQps := int32(math.MaxInt32)
start := time.Now()
for {
// This can potentially affect performance of scheduler, since List() is done under mutex.
// Listing 10000 pods is an expensive operation, so running it frequently may impact scheduler.
// TODO: Setup watch on apiserver and wait until all pods scheduled.
scheduled := config.schedulerConfigFactory.ScheduledPodLister.Indexer.List()
if len(scheduled) >= config.numPods {
fmt.Printf("Scheduled %v Pods in %v seconds (%v per second on average).\n",
config.numPods, int(time.Since(start)/time.Second), config.numPods/int(time.Since(start)/time.Second))
return minQps
}
// There's no point in printing it for the last iteration, as the value is random
qps := len(scheduled) - prev
if int32(qps) < minQps {
minQps = int32(qps)
}
fmt.Printf("%ds\trate: %d\ttotal: %d\n", time.Since(start)/time.Second, qps, len(scheduled))
prev = len(scheduled)
time.Sleep(1 * time.Second)
}
}

View file

@ -0,0 +1,52 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../../../
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
DIR_BASENAME=$(dirname "${BASH_SOURCE}")
pushd ${DIR_BASENAME}
cleanup() {
popd 2> /dev/null
kube::etcd::cleanup
kube::log::status "performance test cleanup complete"
}
trap cleanup EXIT
kube::etcd::start
# We are using the benchmark suite to do profiling. Because it only runs a few pods and
# theoretically it has less variance.
if ${RUN_BENCHMARK:-false}; then
kube::log::status "performance test (benchmark) compiling"
go test -c -o "perf.test"
kube::log::status "performance test (benchmark) start"
"./perf.test" -test.bench=. -test.run=xxxx -test.cpuprofile=prof.out -test.short=false
kube::log::status "...benchmark tests finished"
fi
# Running density tests. It might take a long time.
kube::log::status "performance test (density) start"
go test -test.run=. -test.timeout=60m -test.short=false
kube::log::status "...density tests finished"

View file

@ -0,0 +1,78 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package benchmark
import (
"net/http"
"net/http/httptest"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/plugin/pkg/scheduler"
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
"k8s.io/kubernetes/test/integration/framework"
)
// mustSetupScheduler starts the following components:
// - k8s api server (a.k.a. master)
// - scheduler
// It returns scheduler config factory and destroyFunc which should be used to
// remove resources after finished.
// Notes on rate limiter:
// - client rate limit is set to 5000.
func mustSetupScheduler() (schedulerConfigFactory *factory.ConfigFactory, destroyFunc func()) {
h := &framework.MasterHolder{Initialized: make(chan struct{})}
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
<-h.Initialized
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
}))
framework.RunAMasterUsingServer(framework.NewIntegrationTestMasterConfig(), s, h)
clientSet := clientset.NewForConfigOrDie(&restclient.Config{
Host: s.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion},
QPS: 5000.0,
Burst: 5000,
})
schedulerConfigFactory = factory.NewConfigFactory(clientSet, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
schedulerConfig, err := schedulerConfigFactory.Create()
if err != nil {
panic("Couldn't create scheduler config")
}
eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: "scheduler"})
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: clientSet.Core().Events("")})
scheduler.New(schedulerConfig).Run()
destroyFunc = func() {
glog.Infof("destroying")
close(schedulerConfig.StopEverything)
s.Close()
glog.Infof("destroyed")
}
return
}

View file

@ -0,0 +1,122 @@
// +build integration,!no-etcd
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package secrets
// This file tests use of the secrets API resource.
import (
"testing"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/test/integration"
"k8s.io/kubernetes/test/integration/framework"
)
func deleteSecretOrErrorf(t *testing.T, c clientset.Interface, ns, name string) {
if err := c.Core().Secrets(ns).Delete(name, nil); err != nil {
t.Errorf("unable to delete secret %v: %v", name, err)
}
}
// TestSecrets tests apiserver-side behavior of creation of secret objects and their use by pods.
func TestSecrets(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
ns := framework.CreateTestingNamespace("secret", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
DoTestSecrets(t, client, ns)
}
// DoTestSecrets test secrets for one api version.
func DoTestSecrets(t *testing.T, client clientset.Interface, ns *v1.Namespace) {
// Make a secret object.
s := v1.Secret{
ObjectMeta: v1.ObjectMeta{
Name: "secret",
Namespace: ns.Name,
},
Data: map[string][]byte{
"data": []byte("value1\n"),
},
}
if _, err := client.Core().Secrets(s.Namespace).Create(&s); err != nil {
t.Errorf("unable to create test secret: %v", err)
}
defer deleteSecretOrErrorf(t, client, s.Namespace, s.Name)
// Template for pods that use a secret.
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "XXX",
Namespace: ns.Name,
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "secvol",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: "secret",
},
},
},
},
Containers: []v1.Container{
{
Name: "fake-name",
Image: "fakeimage",
VolumeMounts: []v1.VolumeMount{
{
Name: "secvol",
MountPath: "/fake/path",
ReadOnly: true,
},
},
},
},
},
}
// Create a pod to consume secret.
pod.ObjectMeta.Name = "uses-secret"
if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil {
t.Errorf("Failed to create pod: %v", err)
}
defer integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name)
// Create a pod that consumes non-existent secret.
pod.ObjectMeta.Name = "uses-non-existent-secret"
if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil {
t.Errorf("Failed to create pod: %v", err)
}
defer integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name)
// This pod may fail to run, but we don't currently prevent this, and this
// test can't check whether the kubelet actually pulls the secret.
// Verifying contents of the volumes is out of scope for a
// apiserver<->kubelet integration test. It is covered by an e2e test.
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,102 @@
// +build integration,!no-etcd
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storageclasses
// This file contains tests for the storage classes API resource.
import (
"testing"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1beta1/util"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/test/integration/framework"
)
const provisionerPluginName = "kubernetes.io/mock-provisioner"
// TestStorageClasses tests apiserver-side behavior of creation of storage class objects and their use by pvcs.
func TestStorageClasses(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
ns := framework.CreateTestingNamespace("storageclass", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
DoTestStorageClasses(t, client, ns)
}
// DoTestStorageClasses tests storage classes for one api version.
func DoTestStorageClasses(t *testing.T, client clientset.Interface, ns *v1.Namespace) {
// Make a storage class object.
s := storage.StorageClass{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: v1.ObjectMeta{
Name: "gold",
},
Provisioner: provisionerPluginName,
}
if _, err := client.Storage().StorageClasses().Create(&s); err != nil {
t.Errorf("unable to create test storage class: %v", err)
}
defer deleteStorageClassOrErrorf(t, client, s.Namespace, s.Name)
// Template for pvcs that specify a storage class
pvc := &v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: "XXX",
Namespace: ns.Name,
Annotations: map[string]string{
storageutil.StorageClassAnnotation: "gold",
},
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("1G")}},
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
},
}
pvc.ObjectMeta.Name = "uses-storageclass"
if _, err := client.Core().PersistentVolumeClaims(ns.Name).Create(pvc); err != nil {
t.Errorf("Failed to create pvc: %v", err)
}
defer deletePersistentVolumeClaimOrErrorf(t, client, ns.Name, pvc.Name)
}
func deleteStorageClassOrErrorf(t *testing.T, c clientset.Interface, ns, name string) {
if err := c.Storage().StorageClasses().Delete(name, nil); err != nil {
t.Errorf("unable to delete storage class %v: %v", name, err)
}
}
func deletePersistentVolumeClaimOrErrorf(t *testing.T, c clientset.Interface, ns, name string) {
if err := c.Core().PersistentVolumeClaims(ns).Delete(name, nil); err != nil {
t.Errorf("unable to delete persistent volume claim %v: %v", name, err)
}
}

View file

@ -0,0 +1,231 @@
// +build integration,!no-etcd
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package thirdparty
// This file contains tests for the storage classes API resource.
import (
"encoding/json"
"reflect"
"testing"
"time"
"k8s.io/kubernetes/pkg/api"
apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/util/diff"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/integration/framework"
)
// TODO these tests will eventually be runnable in a single test
func TestThirdPartyDelete(t *testing.T) {
_, s := framework.RunAMaster(framework.NewIntegrationTestMasterConfig())
defer s.Close()
clientConfig := &restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: api.Codecs}}
client := clientset.NewForConfigOrDie(clientConfig)
DoTestInstallThirdPartyAPIDelete(t, client, clientConfig)
}
func TestThirdPartyMultiple(t *testing.T) {
_, s := framework.RunAMaster(framework.NewIntegrationTestMasterConfig())
defer s.Close()
clientConfig := &restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: api.Codecs}}
client := clientset.NewForConfigOrDie(clientConfig)
DoTestInstallMultipleAPIs(t, client, clientConfig)
}
// TODO make multiple versions work. they've been broken
var versionsToTest = []string{"v1"}
type Foo struct {
metav1.TypeMeta `json:",inline"`
v1.ObjectMeta `json:"metadata,omitempty" description:"standard object metadata"`
SomeField string `json:"someField"`
OtherField int `json:"otherField"`
}
type FooList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty" description:"standard list metadata; see http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata"`
Items []Foo `json:"items"`
}
// installThirdParty installs a third party resoure and returns a defer func
func installThirdParty(t *testing.T, client clientset.Interface, clientConfig *restclient.Config, tpr *extensions.ThirdPartyResource, group, version, resource string) func() {
var err error
_, err = client.Extensions().ThirdPartyResources().Create(tpr)
if err != nil {
t.Fatal(err)
}
fooClientConfig := *clientConfig
fooClientConfig.APIPath = "apis"
fooClientConfig.GroupVersion = &schema.GroupVersion{Group: group, Version: version}
fooClient, err := restclient.RESTClientFor(&fooClientConfig)
if err != nil {
t.Fatal(err)
}
err = wait.Poll(100*time.Millisecond, 60*time.Second, func() (bool, error) {
_, err := fooClient.Get().Namespace("default").Resource(resource).DoRaw()
if err == nil {
return true, nil
}
if apierrors.IsNotFound(err) {
return false, nil
}
return false, err
})
if err != nil {
t.Fatal(err)
}
return func() {
client.Extensions().ThirdPartyResources().Delete(tpr.Name, nil)
err = wait.Poll(100*time.Millisecond, 60*time.Second, func() (bool, error) {
_, err := fooClient.Get().Namespace("default").Resource(resource).DoRaw()
if apierrors.IsNotFound(err) {
return true, nil
}
return false, err
})
if err != nil {
t.Fatal(err)
}
}
}
func DoTestInstallMultipleAPIs(t *testing.T, client clientset.Interface, clientConfig *restclient.Config) {
group := "company.com"
version := "v1"
defer installThirdParty(t, client, clientConfig,
&extensions.ThirdPartyResource{
ObjectMeta: v1.ObjectMeta{Name: "foo.company.com"},
Versions: []extensions.APIVersion{{Name: version}},
}, group, version, "foos",
)()
// TODO make multiple resources in one version work
// defer installThirdParty(t, client, clientConfig,
// &extensions.ThirdPartyResource{
// ObjectMeta: v1.ObjectMeta{Name: "bar.company.com"},
// Versions: []extensions.APIVersion{{Name: version}},
// }, group, version, "bars",
// )()
}
func DoTestInstallThirdPartyAPIDelete(t *testing.T, client clientset.Interface, clientConfig *restclient.Config) {
for _, version := range versionsToTest {
testInstallThirdPartyAPIDeleteVersion(t, client, clientConfig, version)
}
}
func testInstallThirdPartyAPIDeleteVersion(t *testing.T, client clientset.Interface, clientConfig *restclient.Config, version string) {
group := "company.com"
defer installThirdParty(t, client, clientConfig,
&extensions.ThirdPartyResource{
ObjectMeta: v1.ObjectMeta{Name: "foo.company.com"},
Versions: []extensions.APIVersion{{Name: version}},
}, group, version, "foos",
)()
fooClientConfig := *clientConfig
fooClientConfig.APIPath = "apis"
fooClientConfig.GroupVersion = &schema.GroupVersion{Group: group, Version: version}
fooClient, err := restclient.RESTClientFor(&fooClientConfig)
if err != nil {
t.Fatal(err)
}
expectedObj := Foo{
ObjectMeta: v1.ObjectMeta{
Name: "test",
Namespace: "default",
},
TypeMeta: metav1.TypeMeta{
Kind: "Foo",
},
SomeField: "test field",
OtherField: 10,
}
objBytes, err := json.Marshal(&expectedObj)
if err != nil {
t.Fatal(err)
}
if _, err := fooClient.Post().Namespace("default").Resource("foos").Body(objBytes).DoRaw(); err != nil {
t.Fatal(err)
}
apiBytes, err := fooClient.Get().Namespace("default").Resource("foos").Name("test").DoRaw()
if err != nil {
t.Fatal(err)
}
item := Foo{}
err = json.Unmarshal(apiBytes, &item)
if err != nil {
t.Fatal(err)
}
// Fill in fields set by the apiserver
item.SelfLink = expectedObj.SelfLink
item.ResourceVersion = expectedObj.ResourceVersion
item.Namespace = expectedObj.Namespace
item.UID = expectedObj.UID
item.CreationTimestamp = expectedObj.CreationTimestamp
if !reflect.DeepEqual(item, expectedObj) {
t.Fatalf("expected:\n%v\n", diff.ObjectGoPrintSideBySide(expectedObj, item))
}
listBytes, err := fooClient.Get().Namespace("default").Resource("foos").DoRaw()
if err != nil {
t.Fatal(err)
}
list := FooList{}
err = json.Unmarshal(listBytes, &list)
if err != nil {
t.Fatal(err)
}
if len(list.Items) != 1 {
t.Fatalf("wrong item: %v", list)
}
if _, err := fooClient.Delete().Namespace("default").Resource("foos").Name("test").DoRaw(); err != nil {
t.Fatal(err)
}
if _, err := fooClient.Get().Namespace("default").Resource("foos").Name("test").DoRaw(); !apierrors.IsNotFound(err) {
t.Fatal(err)
}
}

62
vendor/k8s.io/kubernetes/test/integration/utils.go generated vendored Normal file
View file

@ -0,0 +1,62 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
import (
"testing"
"time"
"k8s.io/kubernetes/pkg/api/errors"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/util/wait"
)
func DeletePodOrErrorf(t *testing.T, c clientset.Interface, ns, name string) {
if err := c.Core().Pods(ns).Delete(name, nil); err != nil {
t.Errorf("unable to delete pod %v: %v", name, err)
}
}
// Requests to try. Each one should be forbidden or not forbidden
// depending on the authentication and authorization setup of the master.
var Code200 = map[int]bool{200: true}
var Code201 = map[int]bool{201: true}
var Code400 = map[int]bool{400: true}
var Code403 = map[int]bool{403: true}
var Code404 = map[int]bool{404: true}
var Code405 = map[int]bool{405: true}
var Code409 = map[int]bool{409: true}
var Code422 = map[int]bool{422: true}
var Code500 = map[int]bool{500: true}
var Code503 = map[int]bool{503: true}
// WaitForPodToDisappear polls the API server if the pod has been deleted.
func WaitForPodToDisappear(podClient coreclient.PodInterface, podName string, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := podClient.Get(podName)
if err == nil {
return false, nil
} else {
if errors.IsNotFound(err) {
return true, nil
} else {
return false, err
}
}
})
}