1
0
Fork 0
forked from barak/tarpoon

Add glide.yaml and vendor deps

This commit is contained in:
Dalton Hubble 2016-12-03 22:43:32 -08:00
parent db918f12ad
commit 5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions

19
vendor/k8s.io/kubernetes/examples/BUILD generated vendored Normal file
View file

@ -0,0 +1,19 @@
package(default_visibility = ["//visibility:public"])
filegroup(
name = "config",
srcs = glob([
"**/*.yaml",
"**/*.yml",
"**/*.json",
]) + [
"pod",
],
)
filegroup(
name = "sources",
srcs = glob([
"**/*",
]),
)

5
vendor/k8s.io/kubernetes/examples/OWNERS generated vendored Normal file
View file

@ -0,0 +1,5 @@
assignees:
- bgrant0607
- brendandburns
- thockin
- zmerlynn

29
vendor/k8s.io/kubernetes/examples/README.md generated vendored Normal file
View file

@ -0,0 +1,29 @@
# Kubernetes Examples: releases.k8s.io/HEAD
This directory contains a number of examples of how to run
real applications with Kubernetes.
Demonstrations of how to use specific Kubernetes features can be found in our [documents](../docs/).
### Maintained Examples
Maintained Examples are expected to be updated with every Kubernetes
release, to use the latest and greatest features, current guidelines
and best practices, and to refresh command syntax, output, changed
prerequisites, as needed.
Name | Description | Notable Features Used | Complexity Level
------------- | ------------- | ------------ | ------------ | ------------
[Guestbook](guestbook/) | PHP app with Redis | Replication Controller, Service | Beginner
[WordPress](mysql-wordpress-pd/) | WordPress with MySQL | Deployment, Persistent Volume with Claim | Beginner
[Cassandra](storage/cassandra/) | Cloud Native Cassandra | Daemon Set | Intermediate
Note: Please add examples to the list above that are maintained.
See [Example Guidelines](guidelines.md) for a description of what goes
in this directory, and what examples should contain.
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->

22
vendor/k8s.io/kubernetes/examples/apiserver/README.md generated vendored Normal file
View file

@ -0,0 +1,22 @@
# API Server
This is a work in progress example for an API Server.
We are working on isolating the generic api server code from kubernetes specific
API objects. Some relevant issues:
* https://github.com/kubernetes/kubernetes/issues/17412
* https://github.com/kubernetes/kubernetes/issues/2742
* https://github.com/kubernetes/kubernetes/issues/13541
This code here is to examplify what it takes to write your own API server.
To start this example api server, run:
```
$ go run examples/apiserver/server/main.go
```
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/apiserver/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->

View file

@ -0,0 +1,142 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apiserver
import (
"fmt"
"net"
"k8s.io/kubernetes/cmd/libs/go2idl/client-gen/test_apis/testgroup/v1"
testgroupetcd "k8s.io/kubernetes/examples/apiserver/rest"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/rest"
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/genericapiserver"
"k8s.io/kubernetes/pkg/genericapiserver/authorizer"
genericoptions "k8s.io/kubernetes/pkg/genericapiserver/options"
genericvalidation "k8s.io/kubernetes/pkg/genericapiserver/validation"
"k8s.io/kubernetes/pkg/registry/generic"
"k8s.io/kubernetes/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/storage/storagebackend"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
// Install the testgroup API
_ "k8s.io/kubernetes/cmd/libs/go2idl/client-gen/test_apis/testgroup/install"
)
const (
// Ports on which to run the server.
// Explicitly setting these to a different value than the default values, to prevent this from clashing with a local cluster.
InsecurePort = 8081
SecurePort = 6444
)
func newStorageFactory() genericapiserver.StorageFactory {
config := storagebackend.Config{
Prefix: genericoptions.DefaultEtcdPathPrefix,
ServerList: []string{"http://127.0.0.1:2379"},
}
storageFactory := genericapiserver.NewDefaultStorageFactory(config, "application/json", api.Codecs, genericapiserver.NewDefaultResourceEncodingConfig(), genericapiserver.NewResourceConfig())
return storageFactory
}
type ServerRunOptions struct {
GenericServerRunOptions *genericoptions.ServerRunOptions
Etcd *genericoptions.EtcdOptions
SecureServing *genericoptions.SecureServingOptions
InsecureServing *genericoptions.ServingOptions
Authentication *genericoptions.BuiltInAuthenticationOptions
}
func NewServerRunOptions() *ServerRunOptions {
s := ServerRunOptions{
GenericServerRunOptions: genericoptions.NewServerRunOptions(),
Etcd: genericoptions.NewEtcdOptions(),
SecureServing: genericoptions.NewSecureServingOptions(),
InsecureServing: genericoptions.NewInsecureServingOptions(),
Authentication: genericoptions.NewBuiltInAuthenticationOptions().WithAll(),
}
s.InsecureServing.BindPort = InsecurePort
s.SecureServing.ServingOptions.BindPort = SecurePort
return &s
}
func (serverOptions *ServerRunOptions) Run(stopCh <-chan struct{}) error {
// Set ServiceClusterIPRange
_, serviceClusterIPRange, _ := net.ParseCIDR("10.0.0.0/24")
serverOptions.GenericServerRunOptions.ServiceClusterIPRange = *serviceClusterIPRange
serverOptions.Etcd.StorageConfig.ServerList = []string{"http://127.0.0.1:2379"}
genericvalidation.ValidateRunOptions(serverOptions.GenericServerRunOptions)
if errs := serverOptions.Etcd.Validate(); len(errs) > 0 {
return utilerrors.NewAggregate(errs)
}
if errs := serverOptions.SecureServing.Validate(); len(errs) > 0 {
return utilerrors.NewAggregate(errs)
}
if errs := serverOptions.InsecureServing.Validate("insecure-port"); len(errs) > 0 {
return utilerrors.NewAggregate(errs)
}
config := genericapiserver.NewConfig().
ApplyOptions(serverOptions.GenericServerRunOptions).
ApplySecureServingOptions(serverOptions.SecureServing).
ApplyInsecureServingOptions(serverOptions.InsecureServing).
ApplyAuthenticationOptions(serverOptions.Authentication).
Complete()
if err := config.MaybeGenerateServingCerts(); err != nil {
// this wasn't treated as fatal for this process before
fmt.Printf("Error creating cert: %v", err)
}
config.Authorizer = authorizer.NewAlwaysAllowAuthorizer()
s, err := config.New()
if err != nil {
return fmt.Errorf("Error in bringing up the server: %v", err)
}
groupVersion := v1.SchemeGroupVersion
groupName := groupVersion.Group
groupMeta, err := registered.Group(groupName)
if err != nil {
return fmt.Errorf("%v", err)
}
storageFactory := newStorageFactory()
storageConfig, err := storageFactory.NewConfig(schema.GroupResource{Group: groupName, Resource: "testtype"})
if err != nil {
return fmt.Errorf("Unable to get storage config: %v", err)
}
restStorageMap := map[string]rest.Storage{
"testtypes": testgroupetcd.NewREST(storageConfig, generic.UndecoratedStorage),
}
apiGroupInfo := genericapiserver.APIGroupInfo{
GroupMeta: *groupMeta,
VersionedResourcesStorageMap: map[string]map[string]rest.Storage{
groupVersion.Version: restStorageMap,
},
Scheme: api.Scheme,
NegotiatedSerializer: api.Codecs,
}
if err := s.InstallAPIGroup(&apiGroupInfo); err != nil {
return fmt.Errorf("Error in installing API: %v", err)
}
s.PrepareRun().Run(stopCh)
return nil
}

View file

@ -0,0 +1,94 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rest
import (
"fmt"
"k8s.io/kubernetes/cmd/libs/go2idl/client-gen/test_apis/testgroup"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/registry/generic"
genericregistry "k8s.io/kubernetes/pkg/registry/generic/registry"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/storage"
"k8s.io/kubernetes/pkg/storage/storagebackend"
)
type REST struct {
*genericregistry.Store
}
// NewREST returns a RESTStorage object that will work with testtype.
func NewREST(config *storagebackend.Config, storageDecorator generic.StorageDecorator) *REST {
prefix := "/testtype"
newListFunc := func() runtime.Object { return &testgroup.TestTypeList{} }
// Usually you should reuse your RESTCreateStrategy.
strategy := &NotNamespaceScoped{}
getAttrs := func(obj runtime.Object) (labels.Set, fields.Set, error) {
testObj, ok := obj.(*testgroup.TestType)
if !ok {
return nil, nil, fmt.Errorf("not a TestType")
}
return labels.Set(testObj.Labels), nil, nil
}
storageInterface, _ := storageDecorator(
config, 100, &testgroup.TestType{}, prefix, strategy, newListFunc, getAttrs, storage.NoTriggerPublisher)
store := &genericregistry.Store{
NewFunc: func() runtime.Object { return &testgroup.TestType{} },
// NewListFunc returns an object capable of storing results of an etcd list.
NewListFunc: newListFunc,
// Produces a path that etcd understands, to the root of the resource
// by combining the namespace in the context with the given prefix.
KeyRootFunc: func(ctx api.Context) string {
return genericregistry.NamespaceKeyRootFunc(ctx, prefix)
},
// Produces a path that etcd understands, to the resource by combining
// the namespace in the context with the given prefix.
KeyFunc: func(ctx api.Context, name string) (string, error) {
return genericregistry.NamespaceKeyFunc(ctx, prefix, name)
},
// Retrieve the name field of the resource.
ObjectNameFunc: func(obj runtime.Object) (string, error) {
return obj.(*testgroup.TestType).Name, nil
},
// Used to match objects based on labels/fields for list.
PredicateFunc: func(label labels.Selector, field fields.Selector) storage.SelectionPredicate {
return storage.SelectionPredicate{
Label: label,
Field: field,
GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) {
testType, ok := obj.(*testgroup.TestType)
if !ok {
return nil, nil, fmt.Errorf("unexpected type of given object")
}
return labels.Set(testType.ObjectMeta.Labels), fields.Set{}, nil
},
}
},
Storage: storageInterface,
}
return &REST{store}
}
type NotNamespaceScoped struct {
}
func (*NotNamespaceScoped) NamespaceScoped() bool {
return false
}

View file

@ -0,0 +1,43 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"k8s.io/kubernetes/examples/apiserver"
"k8s.io/kubernetes/pkg/util/flag"
"k8s.io/kubernetes/pkg/util/wait"
"github.com/golang/glog"
"github.com/spf13/pflag"
)
func main() {
serverRunOptions := apiserver.NewServerRunOptions()
// Parse command line flags.
serverRunOptions.AddUniversalFlags(pflag.CommandLine)
serverRunOptions.Etcd.AddFlags(pflag.CommandLine)
serverRunOptions.SecureServing.AddFlags(pflag.CommandLine)
serverRunOptions.SecureServing.AddDeprecatedFlags(pflag.CommandLine)
serverRunOptions.InsecureServing.AddFlags(pflag.CommandLine)
serverRunOptions.InsecureServing.AddDeprecatedFlags(pflag.CommandLine)
flag.InitFlags()
if err := serverRunOptions.Run(wait.NeverStop); err != nil {
glog.Fatalf("Error in bringing up the server: %v", err)
}
}

182
vendor/k8s.io/kubernetes/examples/cluster-dns/README.md generated vendored Normal file
View file

@ -0,0 +1,182 @@
## Kubernetes DNS example
This is a toy example demonstrating how to use kubernetes DNS.
### Step Zero: Prerequisites
This example assumes that you have forked the repository and [turned up a Kubernetes cluster](../../docs/getting-started-guides/). Make sure DNS is enabled in your setup, see [DNS doc](../../build-tools/kube-dns/).
```sh
$ cd kubernetes
$ hack/dev-build-and-up.sh
```
### Step One: Create two namespaces
We'll see how cluster DNS works across multiple [namespaces](../../docs/user-guide/namespaces.md), first we need to create two namespaces:
```sh
$ kubectl create -f examples/cluster-dns/namespace-dev.yaml
$ kubectl create -f examples/cluster-dns/namespace-prod.yaml
```
Now list all namespaces:
```sh
$ kubectl get namespaces
NAME LABELS STATUS
default <none> Active
development name=development Active
production name=production Active
```
For kubectl client to work with each namespace, we define two contexts:
```sh
$ kubectl config set-context dev --namespace=development --cluster=${CLUSTER_NAME} --user=${USER_NAME}
$ kubectl config set-context prod --namespace=production --cluster=${CLUSTER_NAME} --user=${USER_NAME}
```
You can view your cluster name and user name in kubernetes config at ~/.kube/config.
### Step Two: Create backend replication controller in each namespace
Use the file [`examples/cluster-dns/dns-backend-rc.yaml`](dns-backend-rc.yaml) to create a backend server [replication controller](../../docs/user-guide/replication-controller.md) in each namespace.
```sh
$ kubectl config use-context dev
$ kubectl create -f examples/cluster-dns/dns-backend-rc.yaml
```
Once that's up you can list the pod in the cluster:
```sh
$ kubectl get rc
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
dns-backend dns-backend ddysher/dns-backend name=dns-backend 1
```
Now repeat the above commands to create a replication controller in prod namespace:
```sh
$ kubectl config use-context prod
$ kubectl create -f examples/cluster-dns/dns-backend-rc.yaml
$ kubectl get rc
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
dns-backend dns-backend ddysher/dns-backend name=dns-backend 1
```
### Step Three: Create backend service
Use the file [`examples/cluster-dns/dns-backend-service.yaml`](dns-backend-service.yaml) to create
a [service](../../docs/user-guide/services.md) for the backend server.
```sh
$ kubectl config use-context dev
$ kubectl create -f examples/cluster-dns/dns-backend-service.yaml
```
Once that's up you can list the service in the cluster:
```sh
$ kubectl get service dns-backend
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
dns-backend 10.0.2.3 <none> 8000/TCP name=dns-backend 1d
```
Again, repeat the same process for prod namespace:
```sh
$ kubectl config use-context prod
$ kubectl create -f examples/cluster-dns/dns-backend-service.yaml
$ kubectl get service dns-backend
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
dns-backend 10.0.2.4 <none> 8000/TCP name=dns-backend 1d
```
### Step Four: Create client pod in one namespace
Use the file [`examples/cluster-dns/dns-frontend-pod.yaml`](dns-frontend-pod.yaml) to create a client [pod](../../docs/user-guide/pods.md) in dev namespace. The client pod will make a connection to backend and exit. Specifically, it tries to connect to address `http://dns-backend.development.cluster.local:8000`.
```sh
$ kubectl config use-context dev
$ kubectl create -f examples/cluster-dns/dns-frontend-pod.yaml
```
Once that's up you can list the pod in the cluster:
```sh
$ kubectl get pods dns-frontend
NAME READY STATUS RESTARTS AGE
dns-frontend 0/1 ExitCode:0 0 1m
```
Wait until the pod succeeds, then we can see the output from the client pod:
```sh
$ kubectl logs dns-frontend
2015-05-07T20:13:54.147664936Z 10.0.236.129
2015-05-07T20:13:54.147721290Z Send request to: http://dns-backend.development.cluster.local:8000
2015-05-07T20:13:54.147733438Z <Response [200]>
2015-05-07T20:13:54.147738295Z Hello World!
```
Please refer to the [source code](images/frontend/client.py) about the log. First line prints out the ip address associated with the service in dev namespace; remaining lines print out our request and server response.
If we switch to prod namespace with the same pod config, we'll see the same result, i.e. dns will resolve across namespace.
```sh
$ kubectl config use-context prod
$ kubectl create -f examples/cluster-dns/dns-frontend-pod.yaml
$ kubectl logs dns-frontend
2015-05-07T20:13:54.147664936Z 10.0.236.129
2015-05-07T20:13:54.147721290Z Send request to: http://dns-backend.development.cluster.local:8000
2015-05-07T20:13:54.147733438Z <Response [200]>
2015-05-07T20:13:54.147738295Z Hello World!
```
#### Note about default namespace
If you prefer not using namespace, then all your services can be addressed using `default` namespace, e.g. `http://dns-backend.default.svc.cluster.local:8000`, or shorthand version `http://dns-backend:8000`
### tl; dr;
For those of you who are impatient, here is the summary of the commands we ran in this tutorial. Remember to set first `$CLUSTER_NAME` and `$USER_NAME` to the values found in `~/.kube/config`.
```sh
# create dev and prod namespaces
kubectl create -f examples/cluster-dns/namespace-dev.yaml
kubectl create -f examples/cluster-dns/namespace-prod.yaml
# create two contexts
kubectl config set-context dev --namespace=development --cluster=${CLUSTER_NAME} --user=${USER_NAME}
kubectl config set-context prod --namespace=production --cluster=${CLUSTER_NAME} --user=${USER_NAME}
# create two backend replication controllers
kubectl config use-context dev
kubectl create -f examples/cluster-dns/dns-backend-rc.yaml
kubectl config use-context prod
kubectl create -f examples/cluster-dns/dns-backend-rc.yaml
# create backend services
kubectl config use-context dev
kubectl create -f examples/cluster-dns/dns-backend-service.yaml
kubectl config use-context prod
kubectl create -f examples/cluster-dns/dns-backend-service.yaml
# create a pod in each namespace and get its output
kubectl config use-context dev
kubectl create -f examples/cluster-dns/dns-frontend-pod.yaml
kubectl logs dns-frontend
kubectl config use-context prod
kubectl create -f examples/cluster-dns/dns-frontend-pod.yaml
kubectl logs dns-frontend
```
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/cluster-dns/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->

View file

@ -0,0 +1,21 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: dns-backend
labels:
name: dns-backend
spec:
replicas: 1
selector:
name: dns-backend
template:
metadata:
labels:
name: dns-backend
spec:
containers:
- name: dns-backend
image: gcr.io/google_containers/example-dns-backend:v1
ports:
- name: backend-port
containerPort: 8000

View file

@ -0,0 +1,9 @@
kind: Service
apiVersion: v1
metadata:
name: dns-backend
spec:
ports:
- port: 8000
selector:
name: dns-backend

View file

@ -0,0 +1,16 @@
apiVersion: v1
kind: Pod
metadata:
name: dns-frontend
labels:
name: dns-frontend
spec:
containers:
- name: dns-frontend
image: gcr.io/google_containers/example-dns-frontend:v1
command:
- python
- client.py
- http://dns-backend.development.cluster.local:8000
imagePullPolicy: Always
restartPolicy: Never

View file

@ -0,0 +1,20 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM python:2.7-slim
COPY . /dns-backend
WORKDIR /dns-backend
CMD ["python", "server.py"]

View file

@ -0,0 +1,27 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
TAG = v1
PREFIX = gcr.io/google_containers
IMAGE = example-dns-backend
all: push
image:
docker build -t $(PREFIX)/$(IMAGE):$(TAG) .
push: image
gcloud docker -- push $(PREFIX)/$(IMAGE)
clean:

View file

@ -0,0 +1,37 @@
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
PORT_NUMBER = 8000
# This class will handles any incoming request.
class HTTPHandler(BaseHTTPRequestHandler):
# Handler for the GET requests
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write("Hello World!")
try:
# Create a web server and define the handler to manage the incoming request.
server = HTTPServer(('', PORT_NUMBER), HTTPHandler)
print 'Started httpserver on port ' , PORT_NUMBER
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
server.socket.close()

View file

@ -0,0 +1,22 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM python:2.7-slim
RUN pip install requests
COPY . /dns-frontend
WORKDIR /dns-frontend
CMD ["python", "client.py"]

View file

@ -0,0 +1,27 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
TAG = v1
PREFIX = gcr.io/google_containers
IMAGE = example-dns-frontend
all: push
image:
docker build -t $(PREFIX)/$(IMAGE):$(TAG) .
push: image
gcloud docker -- push $(PREFIX)/$(IMAGE)
clean:

View file

@ -0,0 +1,46 @@
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import requests
import socket
from urlparse import urlparse
def CheckServiceAddress(address):
hostname = urlparse(address).hostname
service_address = socket.gethostbyname(hostname)
print service_address
def GetServerResponse(address):
print 'Send request to:', address
response = requests.get(address)
print response
print response.content
def Main():
parser = argparse.ArgumentParser()
parser.add_argument('address')
args = parser.parse_args()
CheckServiceAddress(args.address)
GetServerResponse(args.address)
if __name__ == "__main__":
Main()

View file

@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: "development"
labels:
name: "development"

View file

@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: "production"
labels:
name: "production"

125
vendor/k8s.io/kubernetes/examples/cockroachdb/README.md generated vendored Normal file
View file

@ -0,0 +1,125 @@
# CockroachDB on Kubernetes as a StatefulSet
This example deploys [CockroachDB](https://cockroachlabs.com) on Kubernetes as
a StatefulSet. CockroachDB is a distributed, scalable NewSQL database. Please see
[the homepage](https://cockroachlabs.com) and the
[documentation](https://www.cockroachlabs.com/docs/) for details.
## Limitations
### StatefulSet limitations
Standard StatefulSet limitations apply: There is currently no possibility to use
node-local storage (outside of single-node tests), and so there is likely
a performance hit associated with running CockroachDB on some external storage.
Note that CockroachDB already does replication and thus it is unnecessary to
deploy it onto persistent volumes which already replicate internally.
For this reason, high-performance use cases on a private Kubernetes cluster
may want to consider a DaemonSet deployment until Stateful Sets support node-local
storage (see #7562).
### Recovery after persistent storage failure
A persistent storage failure (e.g. losing the hard drive) is gracefully handled
by CockroachDB as long as enough replicas survive (two out of three by
default). Due to the bootstrapping in this deployment, a storage failure of the
first node is special in that the administrator must manually prepopulate the
"new" storage medium by running an instance of CockroachDB with the `--join`
parameter. If this is not done, the first node will bootstrap a new cluster,
which will lead to a lot of trouble.
### Dynamic volume provisioning
The deployment is written for a use case in which dynamic volume provisioning is
available. When that is not the case, the persistent volume claims need
to be created manually. See [minikube.sh](minikube.sh) for the necessary
steps. If you're on GCE or AWS, where dynamic provisioning is supported, no
manual work is needed to create the persistent volumes.
## Testing locally on minikube
Follow the steps in [minikube.sh](minikube.sh) (or simply run that file).
## Testing in the cloud on GCE or AWS
Once you have a Kubernetes cluster running, just run
`kubectl create -f cockroachdb-statefulset.yaml` to create your cockroachdb cluster.
This works because GCE and AWS support dynamic volume provisioning by default,
so persistent volumes will be created for the CockroachDB pods as needed.
## Accessing the database
Along with our StatefulSet configuration, we expose a standard Kubernetes service
that offers a load-balanced virtual IP for clients to access the database
with. In our example, we've called this service `cockroachdb-public`.
Start up a client pod and open up an interactive, (mostly) Postgres-flavor
SQL shell using:
```console
$ kubectl run -it --rm cockroach-client --image=cockroachdb/cockroach --restart=Never --command -- ./cockroach sql --host cockroachdb-public
```
You can see example SQL statements for inserting and querying data in the
included [demo script](demo.sh), but can use almost any Postgres-style SQL
commands. Some more basic examples can be found within
[CockroachDB's documentation](https://www.cockroachlabs.com/docs/learn-cockroachdb-sql.html).
## Accessing the admin UI
If you want to see information about how the cluster is doing, you can try
pulling up the CockroachDB admin UI by port-forwarding from your local machine
to one of the pods:
```shell
kubectl port-forward cockroachdb-0 8080
```
Once youve done that, you should be able to access the admin UI by visiting
http://localhost:8080/ in your web browser.
## Simulating failures
When all (or enough) nodes are up, simulate a failure like this:
```shell
kubectl exec cockroachdb-0 -- /bin/bash -c "while true; do kill 1; done"
```
You can then reconnect to the database as demonstrated above and verify
that no data was lost. The example runs with three-fold replication, so
it can tolerate one failure of any given node at a time. Note also that
there is a brief period of time immediately after the creation of the
cluster during which the three-fold replication is established, and during
which killing a node may lead to unavailability.
The [demo script](demo.sh) gives an example of killing one instance of the
database and ensuring the other replicas have all data that was written.
## Scaling up or down
Simply patch the Stateful Set by running
```shell
kubectl patch statefulset cockroachdb -p '{"spec":{"replicas":4}}'
```
Note that you may need to create a new persistent volume claim first. If you
ran `minikube.sh`, there's a spare volume so you can immediately scale up by
one. If you're running on GCE or AWS, you can scale up by as many as you want
because new volumes will automatically be created for you. Convince yourself
that the new node immediately serves reads and writes.
## Cleaning up when you're done
Because all of the resources in this example have been tagged with the label `app=cockroachdb`,
we can clean up everything that we created in one quick command using a selector on that label:
```shell
kubectl delete statefulsets,pods,persistentvolumes,persistentvolumeclaims,services -l app=cockroachdb
```
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/cockroachdb/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->

View file

@ -0,0 +1,174 @@
apiVersion: v1
kind: Service
metadata:
# This service is meant to be used by clients of the database. It exposes a ClusterIP that will
# automatically load balance connections to the different database pods.
name: cockroachdb-public
labels:
app: cockroachdb
spec:
ports:
# The main port, served by gRPC, serves Postgres-flavor SQL, internode
# traffic and the cli.
- port: 26257
targetPort: 26257
name: grpc
# The secondary port serves the UI as well as health and debug endpoints.
- port: 8080
targetPort: 8080
name: http
selector:
app: cockroachdb
---
apiVersion: v1
kind: Service
metadata:
# This service only exists to create DNS entries for each pod in the stateful
# set such that they can resolve each other's IP addresses. It does not
# create a load-balanced ClusterIP and should not be used directly by clients
# in most circumstances.
name: cockroachdb
labels:
app: cockroachdb
annotations:
# This is needed to make the peer-finder work properly and to help avoid
# edge cases where instance 0 comes up after losing its data and needs to
# decide whether it should create a new cluster or try to join an existing
# one. If it creates a new cluster when it should have joined an existing
# one, we'd end up with two separate clusters listening at the same service
# endpoint, which would be very bad.
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
# Enable automatic monitoring of all instances when Prometheus is running in the cluster.
prometheus.io/scrape: "true"
prometheus.io/path: "_status/vars"
prometheus.io/port: "8080"
spec:
ports:
- port: 26257
targetPort: 26257
name: grpc
- port: 8080
targetPort: 8080
name: http
clusterIP: None
selector:
app: cockroachdb
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: cockroachdb
spec:
serviceName: "cockroachdb"
replicas: 3
template:
metadata:
labels:
app: cockroachdb
annotations:
# Init containers are run only once in the lifetime of a pod, before
# it's started up for the first time. It has to exit successfully
# before the pod's main containers are allowed to start.
# This particular init container does a DNS lookup for other pods in
# the set to help determine whether or not a cluster already exists.
# If any other pods exist, it creates a file in the cockroach-data
# directory to pass that information along to the primary container that
# has to decide what command-line flags to use when starting CockroachDB.
# This only matters when a pod's persistent volume is empty - if it has
# data from a previous execution, that data will always be used.
pod.alpha.kubernetes.io/init-containers: '[
{
"name": "bootstrap",
"image": "cockroachdb/cockroach-k8s-init:0.1",
"args": [
"-on-start=/on-start.sh",
"-service=cockroachdb"
],
"env": [
{
"name": "POD_NAMESPACE",
"valueFrom": {
"fieldRef": {
"apiVersion": "v1",
"fieldPath": "metadata.namespace"
}
}
}
],
"volumeMounts": [
{
"name": "datadir",
"mountPath": "/cockroach/cockroach-data"
}
]
}
]'
spec:
containers:
- name: cockroachdb
# Runs the master branch. Not recommended for production, but since
# CockroachDB is in Beta, you don't want to run it in production
# anyway. See
# https://hub.docker.com/r/cockroachdb/cockroach/tags/
# if you prefer to run a beta release.
image: cockroachdb/cockroach
imagePullPolicy: IfNotPresent
ports:
- containerPort: 26257
name: grpc
- containerPort: 8080
name: http
livenessProbe:
httpGet:
path: /_admin/v1/health
port: http
initialDelaySeconds: 30
readinessProbe:
httpGet:
path: /_admin/v1/health
port: http
initialDelaySeconds: 10
volumeMounts:
- name: datadir
mountPath: /cockroach/cockroach-data
command:
- "/bin/bash"
- "-ecx"
- |
# The use of qualified `hostname -f` is crucial:
# Other nodes aren't able to look up the unqualified hostname.
CRARGS=("start" "--logtostderr" "--insecure" "--host" "$(hostname -f)" "--http-host" "0.0.0.0")
# We only want to initialize a new cluster (by omitting the join flag)
# if we're sure that we're the first node (i.e. index 0) and that
# there aren't any other nodes running as part of the cluster that
# this is supposed to be a part of (which indicates that a cluster
# already exists and we should make sure not to create a new one).
# It's fine to run without --join on a restart if there aren't any
# other nodes.
if [ ! "$(hostname)" == "cockroachdb-0" ] || \
[ -e "/cockroach/cockroach-data/cluster_exists_marker" ]
then
# We don't join cockroachdb in order to avoid a node attempting
# to join itself, which currently doesn't work
# (https://github.com/cockroachdb/cockroach/issues/9625).
CRARGS+=("--join" "cockroachdb-public")
fi
exec /cockroach/cockroach ${CRARGS[*]}
# No pre-stop hook is required, a SIGTERM plus some time is all that's
# needed for graceful shutdown of a node.
terminationGracePeriodSeconds: 60
volumes:
- name: datadir
persistentVolumeClaim:
claimName: datadir
volumeClaimTemplates:
- metadata:
name: datadir
annotations:
volume.alpha.kubernetes.io/storage-class: anything
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: 1Gi

47
vendor/k8s.io/kubernetes/examples/cockroachdb/demo.sh generated vendored Executable file
View file

@ -0,0 +1,47 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -euo pipefail
function sql() {
# TODO(knz): Why does the more idiomatic read from stdin not produce any
# output?
kubectl exec "cockroachdb-${1}" -- /cockroach/cockroach sql \
--host "cockroachdb-${1}.cockroachdb" \
-e "$(cat /dev/stdin)"
}
function kill() {
! kubectl exec -t "cockroachdb-${1}" -- /bin/bash -c "while true; do kill 1; done" &> /dev/null
}
# Create database on second node (idempotently for convenience).
cat <<EOF | sql 1
CREATE DATABASE IF NOT EXISTS foo;
CREATE TABLE IF NOT EXISTS foo.bar (k STRING PRIMARY KEY, v STRING);
UPSERT INTO foo.bar VALUES ('Kuber', 'netes'), ('Cockroach', 'DB');
EOF
# Kill the node we just created the table on.
kill 1
# Read the data from all other nodes (we could also read from the one we just
# killed, but it's awkward to wait for it to respawn).
for i in 0 2 3 4; do
cat <<EOF | sql "${i}"
SELECT CONCAT(k, v) FROM foo.bar;
EOF
done

72
vendor/k8s.io/kubernetes/examples/cockroachdb/minikube.sh generated vendored Executable file
View file

@ -0,0 +1,72 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Run the CockroachDB StatefulSet example on a minikube instance.
#
# For a fresh start, run the following first:
# minikube delete
# minikube start
#
# To upgrade minikube & kubectl on OSX, the following should suffice:
# brew reinstall kubernetes-cli --devel
# url -Lo minikube \
# https://storage.googleapis.com/minikube/releases/v0.4.0/minikube-darwin-amd64 && \
# chmod +x minikube && sudo mv minikube /usr/local/bin/
set -exuo pipefail
# Clean up anything from a prior run:
kubectl delete statefulsets,pods,persistentvolumes,persistentvolumeclaims,services -l app=cockroachdb
# Make persistent volumes and (correctly named) claims. We must create the
# claims here manually even though that sounds counter-intuitive. For details
# see https://github.com/kubernetes/contrib/pull/1295#issuecomment-230180894.
# Note that we make an extra volume here so you can manually test scale-up.
for i in $(seq 0 3); do
cat <<EOF | kubectl create -f -
kind: PersistentVolume
apiVersion: v1
metadata:
name: pv${i}
labels:
type: local
app: cockroachdb
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/tmp/${i}"
EOF
cat <<EOF | kubectl create -f -
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: datadir-cockroachdb-${i}
labels:
app: cockroachdb
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
EOF
done;
kubectl create -f cockroachdb-statefulset.yaml

18
vendor/k8s.io/kubernetes/examples/doc.go generated vendored Normal file
View file

@ -0,0 +1,18 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Examples contains sample applications for trying out the concepts in Kubernetes.
package examples // import "k8s.io/kubernetes/examples"

View file

@ -0,0 +1,163 @@
# Elasticsearch for Kubernetes
Kubernetes makes it trivial for anyone to easily build and scale [Elasticsearch](http://www.elasticsearch.org/) clusters. Here, you'll find how to do so.
Current Elasticsearch version is `1.7.1`.
[A more robust example that follows Elasticsearch best-practices of separating nodes concern is also available](production_cluster/README.md).
<img src="http://kubernetes.io/kubernetes/img/warning.png" alt="WARNING" width="25" height="25"> Current pod descriptors use an `emptyDir` for storing data in each data node container. This is meant to be for the sake of simplicity and [should be adapted according to your storage needs](../../docs/design/persistent-storage.md).
## Docker image
The [pre-built image](https://github.com/pires/docker-elasticsearch-kubernetes) used in this example will not be supported. Feel free to fork to fit your own needs, but keep in mind that you will need to change Kubernetes descriptors accordingly.
## Deploy
Let's kickstart our cluster with 1 instance of Elasticsearch.
```
kubectl create -f examples/elasticsearch/service-account.yaml
kubectl create -f examples/elasticsearch/es-svc.yaml
kubectl create -f examples/elasticsearch/es-rc.yaml
```
Let's see if it worked:
```
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
es-kfymw 1/1 Running 0 7m
kube-dns-p3v1u 3/3 Running 0 19m
```
```
$ kubectl logs es-kfymw
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
[2015-08-30 10:01:31,946][INFO ][node ] [Hammerhead] version[1.7.1], pid[7], build[b88f43f/2015-07-29T09:54:16Z]
[2015-08-30 10:01:31,946][INFO ][node ] [Hammerhead] initializing ...
[2015-08-30 10:01:32,110][INFO ][plugins ] [Hammerhead] loaded [cloud-kubernetes], sites []
[2015-08-30 10:01:32,153][INFO ][env ] [Hammerhead] using [1] data paths, mounts [[/data (/dev/sda9)]], net usable_space [14.4gb], net total_space [15.5gb], types [ext4]
[2015-08-30 10:01:37,188][INFO ][node ] [Hammerhead] initialized
[2015-08-30 10:01:37,189][INFO ][node ] [Hammerhead] starting ...
[2015-08-30 10:01:37,499][INFO ][transport ] [Hammerhead] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.244.48.2:9300]}
[2015-08-30 10:01:37,550][INFO ][discovery ] [Hammerhead] myesdb/n2-6uu_UT3W5XNrjyqBPiA
[2015-08-30 10:01:43,966][INFO ][cluster.service ] [Hammerhead] new_master [Hammerhead][n2-6uu_UT3W5XNrjyqBPiA][es-kfymw][inet[/10.244.48.2:9300]]{master=true}, reason: zen-disco-join (elected_as_master)
[2015-08-30 10:01:44,010][INFO ][http ] [Hammerhead] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.244.48.2:9200]}
[2015-08-30 10:01:44,011][INFO ][node ] [Hammerhead] started
[2015-08-30 10:01:44,042][INFO ][gateway ] [Hammerhead] recovered [0] indices into cluster_state
```
So we have a 1-node Elasticsearch cluster ready to handle some work.
## Scale
Scaling is as easy as:
```
kubectl scale --replicas=3 rc es
```
Did it work?
```
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
es-78e0s 1/1 Running 0 8m
es-kfymw 1/1 Running 0 17m
es-rjmer 1/1 Running 0 8m
kube-dns-p3v1u 3/3 Running 0 30m
```
Let's take a look at logs:
```
$ kubectl logs es-kfymw
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
[2015-08-30 10:01:31,946][INFO ][node ] [Hammerhead] version[1.7.1], pid[7], build[b88f43f/2015-07-29T09:54:16Z]
[2015-08-30 10:01:31,946][INFO ][node ] [Hammerhead] initializing ...
[2015-08-30 10:01:32,110][INFO ][plugins ] [Hammerhead] loaded [cloud-kubernetes], sites []
[2015-08-30 10:01:32,153][INFO ][env ] [Hammerhead] using [1] data paths, mounts [[/data (/dev/sda9)]], net usable_space [14.4gb], net total_space [15.5gb], types [ext4]
[2015-08-30 10:01:37,188][INFO ][node ] [Hammerhead] initialized
[2015-08-30 10:01:37,189][INFO ][node ] [Hammerhead] starting ...
[2015-08-30 10:01:37,499][INFO ][transport ] [Hammerhead] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.244.48.2:9300]}
[2015-08-30 10:01:37,550][INFO ][discovery ] [Hammerhead] myesdb/n2-6uu_UT3W5XNrjyqBPiA
[2015-08-30 10:01:43,966][INFO ][cluster.service ] [Hammerhead] new_master [Hammerhead][n2-6uu_UT3W5XNrjyqBPiA][es-kfymw][inet[/10.244.48.2:9300]]{master=true}, reason: zen-disco-join (elected_as_master)
[2015-08-30 10:01:44,010][INFO ][http ] [Hammerhead] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.244.48.2:9200]}
[2015-08-30 10:01:44,011][INFO ][node ] [Hammerhead] started
[2015-08-30 10:01:44,042][INFO ][gateway ] [Hammerhead] recovered [0] indices into cluster_state
[2015-08-30 10:08:02,517][INFO ][cluster.service ] [Hammerhead] added {[Tenpin][2gv5MiwhRiOSsrTOF3DhuA][es-78e0s][inet[/10.244.54.4:9300]]{master=true},}, reason: zen-disco-receive(join from node[[Tenpin][2gv5MiwhRiOSsrTOF3DhuA][es-78e0s][inet[/10.244.54.4:9300]]{master=true}])
[2015-08-30 10:10:10,645][INFO ][cluster.service ] [Hammerhead] added {[Evilhawk][ziTq2PzYRJys43rNL2tbyg][es-rjmer][inet[/10.244.33.3:9300]]{master=true},}, reason: zen-disco-receive(join from node[[Evilhawk][ziTq2PzYRJys43rNL2tbyg][es-rjmer][inet[/10.244.33.3:9300]]{master=true}])
```
So we have a 3-node Elasticsearch cluster ready to handle more work.
## Access the service
*Don't forget* that services in Kubernetes are only acessible from containers in the cluster. For different behavior you should [configure the creation of an external load-balancer](http://kubernetes.io/v1.0/docs/user-guide/services.html#type-loadbalancer). While it's supported within this example service descriptor, its usage is out of scope of this document, for now.
```
$ kubectl get service elasticsearch
NAME LABELS SELECTOR IP(S) PORT(S)
elasticsearch component=elasticsearch component=elasticsearch 10.100.108.94 9200/TCP
9300/TCP
```
From any host on your cluster (that's running `kube-proxy`), run:
```
$ curl 10.100.108.94:9200
```
You should see something similar to the following:
```json
{
"status" : 200,
"name" : "Hammerhead",
"cluster_name" : "myesdb",
"version" : {
"number" : "1.7.1",
"build_hash" : "b88f43fc40b0bcd7f173a1f9ee2e97816de80b19",
"build_timestamp" : "2015-07-29T09:54:16Z",
"build_snapshot" : false,
"lucene_version" : "4.10.4"
},
"tagline" : "You Know, for Search"
}
```
Or if you want to check cluster information:
```
curl 10.100.108.94:9200/_cluster/health?pretty
```
You should see something similar to the following:
```json
{
"cluster_name" : "myesdb",
"status" : "green",
"timed_out" : false,
"number_of_nodes" : 3,
"number_of_data_nodes" : 3,
"active_primary_shards" : 0,
"active_shards" : 0,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0
}
```
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/elasticsearch/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->

View file

@ -0,0 +1,51 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: es
labels:
component: elasticsearch
spec:
replicas: 1
template:
metadata:
labels:
component: elasticsearch
spec:
serviceAccount: elasticsearch
containers:
- name: es
securityContext:
capabilities:
add:
- IPC_LOCK
image: quay.io/pires/docker-elasticsearch-kubernetes:1.7.1-4
env:
- name: KUBERNETES_CA_CERTIFICATE_FILE
value: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: "CLUSTER_NAME"
value: "myesdb"
- name: "DISCOVERY_SERVICE"
value: "elasticsearch"
- name: NODE_MASTER
value: "true"
- name: NODE_DATA
value: "true"
- name: HTTP_ENABLE
value: "true"
ports:
- containerPort: 9200
name: http
protocol: TCP
- containerPort: 9300
name: transport
protocol: TCP
volumeMounts:
- mountPath: /data
name: storage
volumes:
- name: storage
emptyDir: {}

View file

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: elasticsearch
labels:
component: elasticsearch
spec:
type: LoadBalancer
selector:
component: elasticsearch
ports:
- name: http
port: 9200
protocol: TCP
- name: transport
port: 9300
protocol: TCP

View file

@ -0,0 +1,189 @@
# Elasticsearch for Kubernetes
Kubernetes makes it trivial for anyone to easily build and scale [Elasticsearch](http://www.elasticsearch.org/) clusters. Here, you'll find how to do so.
Current Elasticsearch version is `1.7.1`.
Before we start, one needs to know that Elasticsearch best-practices recommend to separate nodes in three roles:
* `Master` nodes - intended for clustering management only, no data, no HTTP API
* `Client` nodes - intended for client usage, no data, with HTTP API
* `Data` nodes - intended for storing and indexing your data, no HTTP API
This is enforced throughout this document.
<img src="http://kubernetes.io/kubernetes/img/warning.png" alt="WARNING" width="25" height="25"> Current pod descriptors use an `emptyDir` for storing data in each data node container. This is meant to be for the sake of simplicity and [should be adapted according to your storage needs](../../../docs/design/persistent-storage.md).
## Docker image
This example uses [this pre-built image](https://github.com/pires/docker-elasticsearch-kubernetes). Feel free to fork and update it to fit your own needs, but keep in mind that you will need to change Kubernetes descriptors accordingly.
## Deploy
```
kubectl create -f examples/elasticsearch/production_cluster/service-account.yaml
kubectl create -f examples/elasticsearch/production_cluster/es-discovery-svc.yaml
kubectl create -f examples/elasticsearch/production_cluster/es-svc.yaml
kubectl create -f examples/elasticsearch/production_cluster/es-master-rc.yaml
```
Wait until `es-master` is provisioned, and
```
kubectl create -f examples/elasticsearch/production_cluster/es-client-rc.yaml
```
Wait until `es-client` is provisioned, and
```
kubectl create -f examples/elasticsearch/production_cluster/es-data-rc.yaml
```
Wait until `es-data` is provisioned.
Now, I leave up to you how to validate the cluster, but a first step is to wait for containers to be in ```RUNNING``` state and check the Elasticsearch master logs:
```
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
es-client-2ep9o 1/1 Running 0 2m
es-data-r9tgv 1/1 Running 0 1m
es-master-vxl6c 1/1 Running 0 6m
```
```
$ kubectl logs es-master-vxl6c
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
[2015-08-21 10:58:51,324][INFO ][node ] [Arc] version[1.7.1], pid[8], build[b88f43f/2015-07-29T09:54:16Z]
[2015-08-21 10:58:51,328][INFO ][node ] [Arc] initializing ...
[2015-08-21 10:58:51,542][INFO ][plugins ] [Arc] loaded [cloud-kubernetes], sites []
[2015-08-21 10:58:51,624][INFO ][env ] [Arc] using [1] data paths, mounts [[/data (/dev/sda9)]], net usable_space [14.4gb], net total_space [15.5gb], types [ext4]
[2015-08-21 10:58:57,439][INFO ][node ] [Arc] initialized
[2015-08-21 10:58:57,439][INFO ][node ] [Arc] starting ...
[2015-08-21 10:58:57,782][INFO ][transport ] [Arc] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.244.15.2:9300]}
[2015-08-21 10:58:57,847][INFO ][discovery ] [Arc] myesdb/-x16XFUzTCC8xYqWoeEOYQ
[2015-08-21 10:59:05,167][INFO ][cluster.service ] [Arc] new_master [Arc][-x16XFUzTCC8xYqWoeEOYQ][es-master-vxl6c][inet[/10.244.15.2:9300]]{data=false, master=true}, reason: zen-disco-join (elected_as_master)
[2015-08-21 10:59:05,202][INFO ][node ] [Arc] started
[2015-08-21 10:59:05,238][INFO ][gateway ] [Arc] recovered [0] indices into cluster_state
[2015-08-21 11:02:28,797][INFO ][cluster.service ] [Arc] added {[Gideon][4EfhWSqaTqikbK4tI7bODA][es-data-r9tgv][inet[/10.244.59.4:9300]]{master=false},}, reason: zen-disco-receive(join from node[[Gideon][4EfhWSqaTqikbK4tI7bODA][es-data-r9tgv][inet[/10.244.59.4:9300]]{master=false}])
[2015-08-21 11:03:16,822][INFO ][cluster.service ] [Arc] added {[Venomm][tFYxwgqGSpOejHLG4umRqg][es-client-2ep9o][inet[/10.244.53.2:9300]]{data=false, master=false},}, reason: zen-disco-receive(join from node[[Venomm][tFYxwgqGSpOejHLG4umRqg][es-client-2ep9o][inet[/10.244.53.2:9300]]{data=false, master=false}])
```
As you can assert, the cluster is up and running. Easy, wasn't it?
## Scale
Scaling each type of node to handle your cluster is as easy as:
```
kubectl scale --replicas=3 rc es-master
kubectl scale --replicas=2 rc es-client
kubectl scale --replicas=2 rc es-data
```
Did it work?
```
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
es-client-2ep9o 1/1 Running 0 4m
es-client-ye5s1 1/1 Running 0 50s
es-data-8az22 1/1 Running 0 47s
es-data-r9tgv 1/1 Running 0 3m
es-master-57h7k 1/1 Running 0 52s
es-master-kuwse 1/1 Running 0 52s
es-master-vxl6c 1/1 Running 0 8m
```
Let's take another look of the Elasticsearch master logs:
```
$ kubectl logs es-master-vxl6c
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
[2015-08-21 10:58:51,324][INFO ][node ] [Arc] version[1.7.1], pid[8], build[b88f43f/2015-07-29T09:54:16Z]
[2015-08-21 10:58:51,328][INFO ][node ] [Arc] initializing ...
[2015-08-21 10:58:51,542][INFO ][plugins ] [Arc] loaded [cloud-kubernetes], sites []
[2015-08-21 10:58:51,624][INFO ][env ] [Arc] using [1] data paths, mounts [[/data (/dev/sda9)]], net usable_space [14.4gb], net total_space [15.5gb], types [ext4]
[2015-08-21 10:58:57,439][INFO ][node ] [Arc] initialized
[2015-08-21 10:58:57,439][INFO ][node ] [Arc] starting ...
[2015-08-21 10:58:57,782][INFO ][transport ] [Arc] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.244.15.2:9300]}
[2015-08-21 10:58:57,847][INFO ][discovery ] [Arc] myesdb/-x16XFUzTCC8xYqWoeEOYQ
[2015-08-21 10:59:05,167][INFO ][cluster.service ] [Arc] new_master [Arc][-x16XFUzTCC8xYqWoeEOYQ][es-master-vxl6c][inet[/10.244.15.2:9300]]{data=false, master=true}, reason: zen-disco-join (elected_as_master)
[2015-08-21 10:59:05,202][INFO ][node ] [Arc] started
[2015-08-21 10:59:05,238][INFO ][gateway ] [Arc] recovered [0] indices into cluster_state
[2015-08-21 11:02:28,797][INFO ][cluster.service ] [Arc] added {[Gideon][4EfhWSqaTqikbK4tI7bODA][es-data-r9tgv][inet[/10.244.59.4:9300]]{master=false},}, reason: zen-disco-receive(join from node[[Gideon][4EfhWSqaTqikbK4tI7bODA][es-data-r9tgv][inet[/10.244.59.4:9300]]{master=false}])
[2015-08-21 11:03:16,822][INFO ][cluster.service ] [Arc] added {[Venomm][tFYxwgqGSpOejHLG4umRqg][es-client-2ep9o][inet[/10.244.53.2:9300]]{data=false, master=false},}, reason: zen-disco-receive(join from node[[Venomm][tFYxwgqGSpOejHLG4umRqg][es-client-2ep9o][inet[/10.244.53.2:9300]]{data=false, master=false}])
[2015-08-21 11:04:40,781][INFO ][cluster.service ] [Arc] added {[Erik Josten][QUJlahfLTi-MsxzM6_Da0g][es-master-kuwse][inet[/10.244.59.5:9300]]{data=false, master=true},}, reason: zen-disco-receive(join from node[[Erik Josten][QUJlahfLTi-MsxzM6_Da0g][es-master-kuwse][inet[/10.244.59.5:9300]]{data=false, master=true}])
[2015-08-21 11:04:41,076][INFO ][cluster.service ] [Arc] added {[Power Princess][V4qnR-6jQOS5ovXQsPgo7g][es-master-57h7k][inet[/10.244.53.3:9300]]{data=false, master=true},}, reason: zen-disco-receive(join from node[[Power Princess][V4qnR-6jQOS5ovXQsPgo7g][es-master-57h7k][inet[/10.244.53.3:9300]]{data=false, master=true}])
[2015-08-21 11:04:53,966][INFO ][cluster.service ] [Arc] added {[Cagliostro][Wpfx5fkBRiG2qCEWd8laaQ][es-client-ye5s1][inet[/10.244.15.3:9300]]{data=false, master=false},}, reason: zen-disco-receive(join from node[[Cagliostro][Wpfx5fkBRiG2qCEWd8laaQ][es-client-ye5s1][inet[/10.244.15.3:9300]]{data=false, master=false}])
[2015-08-21 11:04:56,803][INFO ][cluster.service ] [Arc] added {[Thog][vkdEtX3ESfWmhXXf-Wi0_Q][es-data-8az22][inet[/10.244.15.4:9300]]{master=false},}, reason: zen-disco-receive(join from node[[Thog][vkdEtX3ESfWmhXXf-Wi0_Q][es-data-8az22][inet[/10.244.15.4:9300]]{master=false}])
```
## Access the service
*Don't forget* that services in Kubernetes are only accessible from containers in the cluster. For different behavior you should [configure the creation of an external load-balancer](http://kubernetes.io/v1.0/docs/user-guide/services.html#type-loadbalancer). While it's supported within this example service descriptor, its usage is out of scope of this document, for now.
```
$ kubectl get service elasticsearch
NAME LABELS SELECTOR IP(S) PORT(S)
elasticsearch component=elasticsearch,role=client component=elasticsearch,role=client 10.100.134.2 9200/TCP
```
From any host on your cluster (that's running `kube-proxy`), run:
```
curl http://10.100.134.2:9200
```
You should see something similar to the following:
```json
{
"status" : 200,
"name" : "Cagliostro",
"cluster_name" : "myesdb",
"version" : {
"number" : "1.7.1",
"build_hash" : "b88f43fc40b0bcd7f173a1f9ee2e97816de80b19",
"build_timestamp" : "2015-07-29T09:54:16Z",
"build_snapshot" : false,
"lucene_version" : "4.10.4"
},
"tagline" : "You Know, for Search"
}
```
Or if you want to check cluster information:
```
curl http://10.100.134.2:9200/_cluster/health?pretty
```
You should see something similar to the following:
```json
{
"cluster_name" : "myesdb",
"status" : "green",
"timed_out" : false,
"number_of_nodes" : 7,
"number_of_data_nodes" : 2,
"active_primary_shards" : 0,
"active_shards" : 0,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0
}
```
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/elasticsearch/production_cluster/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->

View file

@ -0,0 +1,51 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: es-client
labels:
component: elasticsearch
role: client
spec:
replicas: 1
template:
metadata:
labels:
component: elasticsearch
role: client
spec:
serviceAccount: elasticsearch
containers:
- name: es-client
securityContext:
capabilities:
add:
- IPC_LOCK
image: quay.io/pires/docker-elasticsearch-kubernetes:1.7.1-4
env:
- name: KUBERNETES_CA_CERTIFICATE_FILE
value: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: "CLUSTER_NAME"
value: "myesdb"
- name: NODE_MASTER
value: "false"
- name: NODE_DATA
value: "false"
- name: HTTP_ENABLE
value: "true"
ports:
- containerPort: 9200
name: http
protocol: TCP
- containerPort: 9300
name: transport
protocol: TCP
volumeMounts:
- mountPath: /data
name: storage
volumes:
- name: storage
emptyDir: {}

View file

@ -0,0 +1,46 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: es-data
labels:
component: elasticsearch
role: data
spec:
replicas: 1
template:
metadata:
labels:
component: elasticsearch
role: data
spec:
serviceAccount: elasticsearch
containers:
- name: es-data
securityContext:
capabilities:
add:
- IPC_LOCK
image: quay.io/pires/docker-elasticsearch-kubernetes:1.7.1-4
env:
- name: KUBERNETES_CA_CERTIFICATE_FILE
value: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: "CLUSTER_NAME"
value: "myesdb"
- name: NODE_MASTER
value: "false"
- name: HTTP_ENABLE
value: "false"
ports:
- containerPort: 9300
name: transport
protocol: TCP
volumeMounts:
- mountPath: /data
name: storage
volumes:
- name: storage
emptyDir: {}

View file

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: elasticsearch-discovery
labels:
component: elasticsearch
role: master
spec:
selector:
component: elasticsearch
role: master
ports:
- name: transport
port: 9300
protocol: TCP

View file

@ -0,0 +1,48 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: es-master
labels:
component: elasticsearch
role: master
spec:
replicas: 1
template:
metadata:
labels:
component: elasticsearch
role: master
spec:
serviceAccount: elasticsearch
containers:
- name: es-master
securityContext:
capabilities:
add:
- IPC_LOCK
image: quay.io/pires/docker-elasticsearch-kubernetes:1.7.1-4
env:
- name: KUBERNETES_CA_CERTIFICATE_FILE
value: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: "CLUSTER_NAME"
value: "myesdb"
- name: NODE_MASTER
value: "true"
- name: NODE_DATA
value: "false"
- name: HTTP_ENABLE
value: "false"
ports:
- containerPort: 9300
name: transport
protocol: TCP
volumeMounts:
- mountPath: /data
name: storage
volumes:
- name: storage
emptyDir: {}

View file

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: elasticsearch
labels:
component: elasticsearch
role: client
spec:
type: LoadBalancer
selector:
component: elasticsearch
role: client
ports:
- name: http
port: 9200
protocol: TCP

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: elasticsearch

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: elasticsearch

461
vendor/k8s.io/kubernetes/examples/examples_test.go generated vendored Normal file
View file

@ -0,0 +1,461 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package examples_test
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"testing"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/validation"
"k8s.io/kubernetes/pkg/apis/apps"
appsvalidation "k8s.io/kubernetes/pkg/apis/apps/validation"
"k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/apis/extensions"
expvalidation "k8s.io/kubernetes/pkg/apis/extensions/validation"
"k8s.io/kubernetes/pkg/capabilities"
"k8s.io/kubernetes/pkg/registry/batch/job"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/util/yaml"
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
schedulerapilatest "k8s.io/kubernetes/plugin/pkg/scheduler/api/latest"
)
func validateObject(obj runtime.Object) (errors field.ErrorList) {
switch t := obj.(type) {
case *api.ReplicationController:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidateReplicationController(t)
case *api.ReplicationControllerList:
for i := range t.Items {
errors = append(errors, validateObject(&t.Items[i])...)
}
case *api.Service:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidateService(t)
case *api.ServiceList:
for i := range t.Items {
errors = append(errors, validateObject(&t.Items[i])...)
}
case *api.Pod:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidatePod(t)
case *api.PodList:
for i := range t.Items {
errors = append(errors, validateObject(&t.Items[i])...)
}
case *api.PersistentVolume:
errors = validation.ValidatePersistentVolume(t)
case *api.PersistentVolumeClaim:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidatePersistentVolumeClaim(t)
case *api.PodTemplate:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidatePodTemplate(t)
case *api.Endpoints:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidateEndpoints(t)
case *api.Namespace:
errors = validation.ValidateNamespace(t)
case *api.Secret:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidateSecret(t)
case *api.LimitRange:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidateLimitRange(t)
case *api.ResourceQuota:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidateResourceQuota(t)
case *extensions.Deployment:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = expvalidation.ValidateDeployment(t)
case *batch.Job:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
// Job needs generateSelector called before validation, and job.Validate does this.
// See: https://github.com/kubernetes/kubernetes/issues/20951#issuecomment-187787040
t.ObjectMeta.UID = types.UID("fakeuid")
errors = job.Strategy.Validate(nil, t)
case *extensions.Ingress:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = expvalidation.ValidateIngress(t)
case *extensions.DaemonSet:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = expvalidation.ValidateDaemonSet(t)
case *apps.StatefulSet:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = appsvalidation.ValidateStatefulSet(t)
default:
errors = field.ErrorList{}
errors = append(errors, field.InternalError(field.NewPath(""), fmt.Errorf("no validation defined for %#v", obj)))
}
return errors
}
func walkJSONFiles(inDir string, fn func(name, path string, data []byte)) error {
return filepath.Walk(inDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() && path != inDir {
return filepath.SkipDir
}
file := filepath.Base(path)
if ext := filepath.Ext(file); ext == ".json" || ext == ".yaml" {
glog.Infof("Testing %s", path)
data, err := ioutil.ReadFile(path)
if err != nil {
return err
}
name := strings.TrimSuffix(file, ext)
if ext == ".yaml" {
out, err := yaml.ToJSON(data)
if err != nil {
return fmt.Errorf("%s: %v", path, err)
}
data = out
}
fn(name, path, data)
}
return nil
})
}
func TestExampleObjectSchemas(t *testing.T) {
cases := map[string]map[string]runtime.Object{
"../examples/guestbook": {
"frontend-deployment": &extensions.Deployment{},
"redis-slave-deployment": &extensions.Deployment{},
"redis-master-deployment": &extensions.Deployment{},
"frontend-service": &api.Service{},
"redis-master-service": &api.Service{},
"redis-slave-service": &api.Service{},
},
"../examples/guestbook/legacy": {
"frontend-controller": &api.ReplicationController{},
"redis-slave-controller": &api.ReplicationController{},
"redis-master-controller": &api.ReplicationController{},
},
"../examples/guestbook-go": {
"guestbook-controller": &api.ReplicationController{},
"redis-slave-controller": &api.ReplicationController{},
"redis-master-controller": &api.ReplicationController{},
"guestbook-service": &api.Service{},
"redis-master-service": &api.Service{},
"redis-slave-service": &api.Service{},
},
"../examples/volumes/iscsi": {
"iscsi": &api.Pod{},
},
"../examples/volumes/glusterfs": {
"glusterfs-pod": &api.Pod{},
"glusterfs-endpoints": &api.Endpoints{},
"glusterfs-service": &api.Service{},
},
"../examples": {
"scheduler-policy-config": &schedulerapi.Policy{},
"scheduler-policy-config-with-extender": &schedulerapi.Policy{},
},
"../examples/volumes/rbd/secret": {
"ceph-secret": &api.Secret{},
},
"../examples/volumes/rbd": {
"rbd": &api.Pod{},
"rbd-with-secret": &api.Pod{},
},
"../examples/storage/cassandra": {
"cassandra-daemonset": &extensions.DaemonSet{},
"cassandra-controller": &api.ReplicationController{},
"cassandra-service": &api.Service{},
"cassandra-statefulset": &apps.StatefulSet{},
},
"../examples/cluster-dns": {
"dns-backend-rc": &api.ReplicationController{},
"dns-backend-service": &api.Service{},
"dns-frontend-pod": &api.Pod{},
"namespace-dev": &api.Namespace{},
"namespace-prod": &api.Namespace{},
},
"../examples/elasticsearch": {
"es-rc": &api.ReplicationController{},
"es-svc": &api.Service{},
"service-account": nil,
},
"../examples/explorer": {
"pod": &api.Pod{},
},
"../examples/storage/hazelcast": {
"hazelcast-controller": &api.ReplicationController{},
"hazelcast-service": &api.Service{},
},
"../examples/meteor": {
"meteor-controller": &api.ReplicationController{},
"meteor-service": &api.Service{},
"mongo-pod": &api.Pod{},
"mongo-service": &api.Service{},
},
"../examples/mysql-wordpress-pd": {
"gce-volumes": &api.PersistentVolume{},
"local-volumes": &api.PersistentVolume{},
"mysql-deployment": &api.Service{},
"wordpress-deployment": &api.Service{},
},
"../examples/volumes/nfs": {
"nfs-busybox-rc": &api.ReplicationController{},
"nfs-server-rc": &api.ReplicationController{},
"nfs-server-service": &api.Service{},
"nfs-pv": &api.PersistentVolume{},
"nfs-pvc": &api.PersistentVolumeClaim{},
"nfs-web-rc": &api.ReplicationController{},
"nfs-web-service": &api.Service{},
},
"../examples/openshift-origin": {
"openshift-origin-namespace": &api.Namespace{},
"openshift-controller": &api.ReplicationController{},
"openshift-service": &api.Service{},
"etcd-controller": &api.ReplicationController{},
"etcd-service": &api.Service{},
"etcd-discovery-controller": &api.ReplicationController{},
"etcd-discovery-service": &api.Service{},
"secret": nil,
},
"../examples/phabricator": {
"phabricator-controller": &api.ReplicationController{},
"phabricator-service": &api.Service{},
},
"../examples/storage/redis": {
"redis-controller": &api.ReplicationController{},
"redis-master": &api.Pod{},
"redis-proxy": &api.Pod{},
"redis-sentinel-controller": &api.ReplicationController{},
"redis-sentinel-service": &api.Service{},
},
"../examples/storage/rethinkdb": {
"admin-pod": &api.Pod{},
"admin-service": &api.Service{},
"driver-service": &api.Service{},
"rc": &api.ReplicationController{},
},
"../examples/spark": {
"namespace-spark-cluster": &api.Namespace{},
"spark-master-controller": &api.ReplicationController{},
"spark-master-service": &api.Service{},
"spark-ui-proxy-controller": &api.ReplicationController{},
"spark-ui-proxy-service": &api.Service{},
"spark-worker-controller": &api.ReplicationController{},
"zeppelin-controller": &api.ReplicationController{},
"zeppelin-service": &api.Service{},
},
"../examples/spark/spark-gluster": {
"spark-master-service": &api.Service{},
"spark-master-controller": &api.ReplicationController{},
"spark-worker-controller": &api.ReplicationController{},
"glusterfs-endpoints": &api.Endpoints{},
},
"../examples/storm": {
"storm-nimbus-service": &api.Service{},
"storm-nimbus": &api.Pod{},
"storm-worker-controller": &api.ReplicationController{},
"zookeeper-service": &api.Service{},
"zookeeper": &api.Pod{},
},
"../examples/volumes/cephfs/": {
"cephfs": &api.Pod{},
"cephfs-with-secret": &api.Pod{},
},
"../examples/volumes/fibre_channel": {
"fc": &api.Pod{},
},
"../examples/javaweb-tomcat-sidecar": {
"javaweb": &api.Pod{},
"javaweb-2": &api.Pod{},
},
"../examples/volumes/azure_file": {
"azure": &api.Pod{},
},
"../examples/volumes/azure_disk": {
"azure": &api.Pod{},
},
}
capabilities.SetForTests(capabilities.Capabilities{
AllowPrivileged: true,
})
for path, expected := range cases {
tested := 0
err := walkJSONFiles(path, func(name, path string, data []byte) {
expectedType, found := expected[name]
if !found {
t.Errorf("%s: %s does not have a test case defined", path, name)
return
}
tested++
if expectedType == nil {
t.Logf("skipping : %s/%s\n", path, name)
return
}
if strings.Contains(name, "scheduler-policy-config") {
if err := runtime.DecodeInto(schedulerapilatest.Codec, data, expectedType); err != nil {
t.Errorf("%s did not decode correctly: %v\n%s", path, err, string(data))
return
}
//TODO: Add validate method for &schedulerapi.Policy
} else {
codec, err := testapi.GetCodecForObject(expectedType)
if err != nil {
t.Errorf("Could not get codec for %s: %s", expectedType, err)
}
if err := runtime.DecodeInto(codec, data, expectedType); err != nil {
t.Errorf("%s did not decode correctly: %v\n%s", path, err, string(data))
return
}
if errors := validateObject(expectedType); len(errors) > 0 {
t.Errorf("%s did not validate correctly: %v", path, errors)
}
}
})
if err != nil {
t.Errorf("Expected no error, Got %v", err)
}
if tested != len(expected) {
t.Errorf("Directory %v: Expected %d examples, Got %d", path, len(expected), tested)
}
}
}
// This regex is tricky, but it works. For future me, here is the decode:
//
// Flags: (?ms) = multiline match, allow . to match \n
// 1) Look for a line that starts with ``` (a markdown code block)
// 2) (?: ... ) = non-capturing group
// 3) (P<name>) = capture group as "name"
// 4) Look for #1 followed by either:
// 4a) "yaml" followed by any word-characters followed by a newline (e.g. ```yamlfoo\n)
// 4b) "any word-characters followed by a newline (e.g. ```json\n)
// 5) Look for either:
// 5a) #4a followed by one or more characters (non-greedy)
// 5b) #4b followed by { followed by one or more characters (non-greedy) followed by }
// 6) Look for #5 followed by a newline followed by ``` (end of the code block)
//
// This could probably be simplified, but is already too delicate. Before any
// real changes, we should have a testscase that just tests this regex.
var sampleRegexp = regexp.MustCompile("(?ms)^```(?:(?P<type>yaml)\\w*\\n(?P<content>.+?)|\\w*\\n(?P<content>\\{.+?\\}))\\n^```")
var subsetRegexp = regexp.MustCompile("(?ms)\\.{3}")
func TestReadme(t *testing.T) {
paths := []struct {
file string
expectedType []runtime.Object
}{
{"../README.md", []runtime.Object{&api.Pod{}}},
{"../examples/volumes/iscsi/README.md", []runtime.Object{&api.Pod{}}},
}
for _, path := range paths {
data, err := ioutil.ReadFile(path.file)
if err != nil {
t.Errorf("Unable to read file %s: %v", path, err)
continue
}
matches := sampleRegexp.FindAllStringSubmatch(string(data), -1)
if matches == nil {
continue
}
ix := 0
for _, match := range matches {
var content, subtype string
for i, name := range sampleRegexp.SubexpNames() {
if name == "type" {
subtype = match[i]
}
if name == "content" && match[i] != "" {
content = match[i]
}
}
if subtype == "yaml" && subsetRegexp.FindString(content) != "" {
t.Logf("skipping (%s): \n%s", subtype, content)
continue
}
var expectedType runtime.Object
if len(path.expectedType) == 1 {
expectedType = path.expectedType[0]
} else {
expectedType = path.expectedType[ix]
ix++
}
json, err := yaml.ToJSON([]byte(content))
if err != nil {
t.Errorf("%s could not be converted to JSON: %v\n%s", path, err, string(content))
}
if err := runtime.DecodeInto(testapi.Default.Codec(), json, expectedType); err != nil {
t.Errorf("%s did not decode correctly: %v\n%s", path, err, string(content))
continue
}
if errors := validateObject(expectedType); len(errors) > 0 {
t.Errorf("%s did not validate correctly: %v", path, errors)
}
_, err = runtime.Encode(testapi.Default.Codec(), expectedType)
if err != nil {
t.Errorf("Could not encode object: %v", err)
continue
}
}
}
}

View file

@ -0,0 +1,414 @@
## Persistent Volume Provisioning
This example shows how to use experimental persistent volume provisioning.
### Prerequisites
This example assumes that you have an understanding of Kubernetes administration and can modify the
scripts that launch kube-controller-manager.
### Admin Configuration
The admin must define `StorageClass` objects that describe named "classes" of storage offered in a cluster. Different classes might map to arbitrary levels or policies determined by the admin. When configuring a `StorageClass` object for persistent volume provisioning, the admin will need to describe the type of provisioner to use and the parameters that will be used by the provisioner when it provisions a `PersistentVolume` belonging to the class.
The name of a StorageClass object is significant, and is how users can request a particular class, by specifying the name in their `PersistentVolumeClaim`. The `provisioner` field must be specified as it determines what volume plugin is used for provisioning PVs. 2 cloud providers will be provided in the beta version of this feature: EBS and GCE. The `parameters` field contains the parameters that describe volumes belonging to the storage class. Different parameters may be accepted depending on the `provisioner`. For example, the value `io1`, for the parameter `type`, and the parameter `iopsPerGB` are specific to EBS . When a parameter is omitted, some default is used.
#### AWS
```yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1beta1
metadata:
name: slow
provisioner: kubernetes.io/aws-ebs
parameters:
type: io1
zone: us-east-1d
iopsPerGB: "10"
```
* `type`: `io1`, `gp2`, `sc1`, `st1`. See AWS docs for details. Default: `gp2`.
* `zone`: AWS zone. If not specified, a random zone from those where Kubernetes cluster has a node is chosen.
* `iopsPerGB`: only for `io1` volumes. I/O operations per second per GiB. AWS volume plugin multiplies this with size of requested volume to compute IOPS of the volume and caps it at 20 000 IOPS (maximum supported by AWS, see AWS docs).
* `encrypted`: denotes whether the EBS volume should be encrypted or not. Valid values are `true` or `false`.
* `kmsKeyId`: optional. The full Amazon Resource Name of the key to use when encrypting the volume. If none is supplied but `encrypted` is true, a key is generated by AWS. See AWS docs for valid ARN value.
#### GCE
```yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1beta1
metadata:
name: slow
provisioner: kubernetes.io/gce-pd
parameters:
type: pd-standard
zone: us-central1-a
```
* `type`: `pd-standard` or `pd-ssd`. Default: `pd-ssd`
* `zone`: GCE zone. If not specified, a random zone in the same region as controller-manager will be chosen.
#### vSphere
```yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1beta1
metadata:
name: slow
provisioner: kubernetes.io/vsphere-volume
parameters:
diskformat: eagerzeroedthick
```
* `diskformat`: `thin`, `zeroedthick` and `eagerzeroedthick`. See vSphere docs for details. Default: `"thin"`.
#### GLUSTERFS
```yaml
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: slow
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://127.0.0.1:8081"
clusterid: "630372ccdc720a92c681fb928f27b53f"
restuser: "admin"
secretNamespace: "default"
secretName: "heketi-secret"
```
* `resturl` : Gluster REST service/Heketi service url which provision gluster volumes on demand. The general format should be `IPaddress:Port` and this is a mandatory parameter for GlusterFS dynamic provisioner. If Heketi service is exposed as a routable service in openshift/kubernetes setup, this can have a format similar to
`http://heketi-storage-project.cloudapps.mystorage.com` where the fqdn is a resolvable heketi service url.
* `restauthenabled` : Gluster REST service authentication boolean that enables authentication to the REST server. If this value is 'true', `restuser` and `restuserkey` or `secretNamespace` + `secretName` have to be filled. This option is deprecated, authentication is enabled when any of `restuser`, `restuserkey`, `secretName` or `secretNamespace` is specified.
* `restuser` : Gluster REST service/Heketi user who has access to create volumes in the Gluster Trusted Pool.
* `restuserkey` : Gluster REST service/Heketi user's password which will be used for authentication to the REST server. This parameter is deprecated in favor of `secretNamespace` + `secretName`.
* `secretNamespace` + `secretName` : Identification of Secret instance that containes user password to use when talking to Gluster REST service. These parameters are optional, empty password will be used when both `secretNamespace` and `secretName` are omitted. The provided secret must have type "kubernetes.io/glusterfs".
When both `restuserkey` and `secretNamespace` + `secretName` is specified, the secret will be used.
* `clusterid`: `630372ccdc720a92c681fb928f27b53f` is the ID of the cluster which will be used by Heketi when provisioning the volume. It can also be a list of clusterids, for ex:
"8452344e2becec931ece4e33c4674e4e,42982310de6c63381718ccfa6d8cf397". This is an optional parameter.
Example of a secret can be found in [glusterfs-provisioning-secret.yaml](glusterfs-provisioning-secret.yaml).
Reference : ([How to configure Heketi](https://github.com/heketi/heketi/wiki/Setting-up-the-topology))
When the persistent volumes are dynamically provisioned, the Gluster plugin automatically create an endpoint and a headless service in the name `gluster-dynamic-<claimname>`. This dynamic endpoint and service will be deleted automatically when the persistent volume claim is deleted.
#### OpenStack Cinder
```yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1beta1
metadata:
name: gold
provisioner: kubernetes.io/cinder
parameters:
type: fast
availability: nova
```
* `type`: [VolumeType](http://docs.openstack.org/admin-guide/dashboard-manage-volumes.html) created in Cinder. Default is empty.
* `availability`: Availability Zone. Default is empty.
#### Ceph RBD
```yaml
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: fast
provisioner: kubernetes.io/rbd
parameters:
monitors: 10.16.153.105:6789
adminId: kube
adminSecretName: ceph-secret
adminSecretNamespace: kube-system
pool: kube
userId: kube
userSecretName: ceph-secret-user
```
* `monitors`: Ceph monitors, comma delimited. It is required.
* `adminId`: Ceph client ID that is capable of creating images in the pool. Default is "admin".
* `adminSecret`: Secret Name for `adminId`. It is required. The provided secret must have type "kubernetes.io/rbd".
* `adminSecretNamespace`: The namespace for `adminSecret`. Default is "default".
* `pool`: Ceph RBD pool. Default is "rbd".
* `userId`: Ceph client ID that is used to map the RBD image. Default is the same as `adminId`.
* `userSecretName`: The name of Ceph Secret for `userId` to map RBD image. It must exist in the same namespace as PVCs. It is required.
#### Quobyte
<!-- BEGIN MUNGE: EXAMPLE quobyte/quobyte-storage-class.yaml -->
```yaml
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: slow
provisioner: kubernetes.io/quobyte
parameters:
quobyteAPIServer: "http://138.68.74.142:7860"
registry: "138.68.74.142:7861"
adminSecretName: "quobyte-admin-secret"
adminSecretNamespace: "kube-system"
user: "root"
group: "root"
quobyteConfig: "BASE"
quobyteTenant: "DEFAULT"
```
[Download example](quobyte/quobyte-storage-class.yaml?raw=true)
<!-- END MUNGE: EXAMPLE quobyte/quobyte-storage-class.yaml -->
* **quobyteAPIServer** API Server of Quobyte in the format http(s)://api-server:7860
* **registry** Quobyte registry to use to mount the volume. You can specifiy the registry as <host>:<port> pair or if you want to specify multiple registries you just have to put a comma between them e.q. <host1>:<port>,<host2>:<port>,<host3>:<port>. The host can be an IP address or if you have a working DNS you can also provide the DNS names.
* **adminSecretName** secret that holds information about the Quobyte user and the password to authenticate agains the API server. The provided secret must have type "kubernetes.io/quobyte".
* **adminSecretNamespace** The namespace for **adminSecretName**. Default is `default`.
* **user** maps all access to this user. Default is `root`.
* **group** maps all access to this group. Default is `nfsnobody`.
* **quobyteConfig** use the specified configuration to create the volume. You can create a new configuration or modify an existing one with the Web console or the quobyte CLI. Default is `BASE`
* **quobyteTenant** use the specified tenant ID to create/delete the volume. This Quobyte tenant has to be already present in Quobyte. Default is `DEFAULT`
First create Quobyte admin's Secret in the system namespace. Here the Secret is created in `kube-system`:
```
$ kubectl create -f examples/experimental/persistent-volume-provisioning/quobyte/quobyte-admin-secret.yaml --namespace=kube-system
```
Then create the Quobyte storage class:
```
$ kubectl create -f examples/experimental/persistent-volume-provisioning/quobyte/quobyte-storage-class.yaml
```
Now create a PVC
```
$ kubectl create -f examples/experimental/persistent-volume-provisioning/claim1.json
```
Check the created PVC:
```
$ kubectl describe pvc
Name: claim1
Namespace: default
Status: Bound
Volume: pvc-bdb82652-694a-11e6-b811-080027242396
Labels: <none>
Capacity: 3Gi
Access Modes: RWO
No events.
$ kubectl describe pv
Name: pvc-bdb82652-694a-11e6-b811-080027242396
Labels: <none>
Status: Bound
Claim: default/claim1
Reclaim Policy: Delete
Access Modes: RWO
Capacity: 3Gi
Message:
Source:
Type: Quobyte (a Quobyte mount on the host that shares a pod's lifetime)
Registry: 138.68.79.14:7861
Volume: kubernetes-dynamic-pvc-bdb97c58-694a-11e6-91b6-080027242396
ReadOnly: false
No events.
```
Create a Pod to use the PVC:
```
$ kubectl create -f examples/experimental/persistent-volume-provisioning/quobyte/example-pod.yaml
```
#### Azure Disk
```yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1beta1
metadata:
name: slow
provisioner: kubernetes.io/azure-disk
parameters:
skuName: Standard_LRS
location: eastus
storageAccount: azure_storage_account_name
```
* `skuName`: Azure storage account Sku tier. Default is empty.
* `location`: Azure storage account location. Default is empty.
* `storageAccount`: Azure storage account name. If storage account is not provided, all storage accounts associated with the resource group are searched to find one that matches `skuName` and `location`. If storage account is provided, `skuName` and `location` are ignored.
### User provisioning requests
Users request dynamically provisioned storage by including a storage class in their `PersistentVolumeClaim`.
The annotation `volume.beta.kubernetes.io/storage-class` is used to access this experimental feature. It is required that this value matches the name of a `StorageClass` configured by the administrator.
In the future, the storage class may remain in an annotation or become a field on the claim itself.
```
{
"kind": "PersistentVolumeClaim",
"apiVersion": "v1",
"metadata": {
"name": "claim1",
"annotations": {
"volume.beta.kubernetes.io/storage-class": "slow"
}
},
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"resources": {
"requests": {
"storage": "3Gi"
}
}
}
}
```
### Sample output
#### GCE
This example uses GCE but any provisioner would follow the same flow.
First we note there are no Persistent Volumes in the cluster. After creating a storage class and a claim including that storage class, we see a new PV is created
and automatically bound to the claim requesting storage.
```
$ kubectl get pv
$ kubectl create -f examples/experimental/persistent-volume-provisioning/gce-pd.yaml
storageclass "slow" created
$ kubectl create -f examples/experimental/persistent-volume-provisioning/claim1.json
persistentvolumeclaim "claim1" created
$ kubectl get pv
NAME CAPACITY ACCESSMODES STATUS CLAIM REASON AGE
pvc-bb6d2f0c-534c-11e6-9348-42010af00002 3Gi RWO Bound default/claim1 4s
$ kubectl get pvc
NAME LABELS STATUS VOLUME CAPACITY ACCESSMODES AGE
claim1 <none> Bound pvc-bb6d2f0c-534c-11e6-9348-42010af00002 3Gi RWO 7s
# delete the claim to release the volume
$ kubectl delete pvc claim1
persistentvolumeclaim "claim1" deleted
# the volume is deleted in response to being release of its claim
$ kubectl get pv
```
#### Ceph RBD
This section will guide you on how to configure and use the Ceph RBD provisioner.
##### Pre-requisites
For this to work you must have a functional Ceph cluster, and the `rbd` command line utility must be installed on any host/container that `kube-controller-manager` or `kubelet` is running on.
##### Configuration
First we must identify the Ceph client admin key. This is usually found in `/etc/ceph/ceph.client.admin.keyring` on your Ceph cluster nodes. The file will look something like this:
```
[client.admin]
key = AQBfxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx==
auid = 0
caps mds = "allow"
caps mon = "allow *"
caps osd = "allow *"
```
From the key value, we will create a secret. We must create the Ceph admin Secret in the namespace defined in our `StorageClass`. In this example we set the namespace to `kube-system`.
```
$ kubectl create secret generic ceph-secret-admin --from-literal=key='AQBfxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx==' --namespace=kube-system
```
Now modify `examples/experimental/persistent-volume-provisioning/rbd/rbd-storage-class.yaml` to reflect your environment, particularly the `monitors` field. We are now ready to create our RBD Storage Class:
```
$ kubectl create -f examples/experimental/persistent-volume-provisioning/rbd/rbd-storage-class.yaml
```
The kube-controller-manager is now able to provision storage, however we still need to be able to map it. Mapping should be done with a non-privileged key, if you have existing users you can get all keys by running `ceph auth list` on your Ceph cluster with the admin key. For this example we will create a new user and pool.
```
$ ceph osd pool create kube 512
$ ceph auth get-or-create client.kube mon 'allow r' osd 'allow rwx pool=kube'
[client.kube]
key = AQBQyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy==
```
##### Usage
Once configured, create a PVC in a user's namespace (e.g. myns):
```
$ kubectl create -f examples/experimental/persistent-volume-provisioning/claim1.json --namespace=myns
```
Eventually the PVC creation will result in a PV and RBD volume to match:
```
$ kubectl describe pvc --namespace=myns
Name: claim1
Namespace: myns
Status: Bound
Volume: pvc-1cfa23b3-664b-11e6-9eb9-90b11c09520d
Labels: <none>
Capacity: 3Gi
Access Modes: RWO
No events.
$ kubectl describe pv
Name: pvc-1cfa23b3-664b-11e6-9eb9-90b11c09520d
Labels: <none>
Status: Bound
Claim: myns/claim1
Reclaim Policy: Delete
Access Modes: RWO
Capacity: 3Gi
Message:
Source:
Type: RBD (a Rados Block Device mount on the host that shares a pod's lifetime)
CephMonitors: [127.0.0.1:6789]
RBDImage: kubernetes-dynamic-pvc-1cfb1862-664b-11e6-9a5d-90b11c09520d
FSType:
RBDPool: kube
RadosUser: kube
Keyring: /etc/ceph/keyring
SecretRef: &{ceph-secret-user}
ReadOnly: false
No events.
```
With our storage provisioned, we can now create a Pod to use the PVC:
```
$ kubectl create -f examples/experimental/persistent-volume-provisioning/rbd/pod.yaml --namespace=myns
```
Now our pod has an RBD mount!
```
$ export PODNAME=`kubectl get pod --selector='role=server' --namespace=myns --output=template --template="{{with index .items 0}}{{.metadata.name}}{{end}}"`
$ kubectl exec -it $PODNAME --namespace=myns -- df -h | grep rbd
/dev/rbd1 2.9G 4.5M 2.8G 1% /var/lib/www/html
```
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/experimental/persistent-volume-provisioning/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->

View file

@ -0,0 +1,9 @@
kind: StorageClass
apiVersion: storage.k8s.io/v1beta1
metadata:
name: slow
provisioner: kubernetes.io/aws-ebs
parameters:
type: io1
zone: us-east-1d
iopsPerGB: "10"

View file

@ -0,0 +1,20 @@
{
"kind": "PersistentVolumeClaim",
"apiVersion": "v1",
"metadata": {
"name": "claim1",
"annotations": {
"volume.beta.kubernetes.io/storage-class": "slow"
}
},
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"resources": {
"requests": {
"storage": "3Gi"
}
}
}
}

View file

@ -0,0 +1,8 @@
kind: StorageClass
apiVersion: storage.k8s.io/v1beta1
metadata:
name: slow
provisioner: kubernetes.io/gce-pd
parameters:
type: pd-standard
zone: us-central1-a

View file

@ -0,0 +1,11 @@
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: slow
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://127.0.0.1:8081"
clusterid: "630372ccdc720a92c681fb928f27b53f"
restuser: "admin"
secretNamespace: "default"
secretName: "heketi-secret"

View file

@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: heketi-secret
namespace: default
data:
# base64 encoded password. E.g.: echo -n "mypassword" | base64
key: bXlwYXNzd29yZA==
type: kubernetes.io/glusterfs

View file

@ -0,0 +1,23 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: server
spec:
replicas: 1
selector:
role: server
template:
metadata:
labels:
role: server
spec:
containers:
- name: server
image: nginx
volumeMounts:
- mountPath: /var/lib/www/html
name: quobytepvc
volumes:
- name: quobytepvc
persistentVolumeClaim:
claimName: claim1

View file

@ -0,0 +1,8 @@
apiVersion: v1
kind: Secret
metadata:
name: quobyte-admin-secret
data:
password: cXVvYnl0ZQ==
user: YWRtaW4=
type: kubernetes.io/quobyte

View file

@ -0,0 +1,14 @@
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: slow
provisioner: kubernetes.io/quobyte
parameters:
quobyteAPIServer: "http://138.68.74.142:7860"
registry: "138.68.74.142:7861"
adminSecretName: "quobyte-admin-secret"
adminSecretNamespace: "kube-system"
user: "root"
group: "root"
quobyteConfig: "BASE"
quobyteTenant: "DEFAULT"

View file

@ -0,0 +1,8 @@
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret-admin
type: "kubernetes.io/rbd"
data:
key: QVFEQ1pMdFhPUnQrSmhBQUFYaERWNHJsZ3BsMmNjcDR6RFZST0E9PQ==
type: kubernetes.io/rbd

View file

@ -0,0 +1,7 @@
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret-user
type: "kubernetes.io/rbd"
data:
key: QVFBTWdYaFZ3QkNlRGhBQTlubFBhRnlmVVNhdEdENGRyRldEdlE9PQ==

View file

@ -0,0 +1,23 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: server
spec:
replicas: 1
selector:
role: server
template:
metadata:
labels:
role: server
spec:
containers:
- name: server
image: nginx
volumeMounts:
- mountPath: /var/lib/www/html
name: mypvc
volumes:
- name: mypvc
persistentVolumeClaim:
claimName: claim1

View file

@ -0,0 +1,14 @@
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: slow
provisioner: kubernetes.io/rbd
parameters:
monitors: 127.0.0.1:6789
adminId: admin
adminSecretName: ceph-secret-admin
adminSecretNamespace: "kube-system"
pool: kube
userId: kube
userSecretName: ceph-secret-user

20
vendor/k8s.io/kubernetes/examples/explorer/Dockerfile generated vendored Normal file
View file

@ -0,0 +1,20 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM scratch
MAINTAINER Daniel Smith <dbsmith@google.com>
ADD explorer explorer
ADD README.md README.md
EXPOSE 8080
ENTRYPOINT ["/explorer"]

30
vendor/k8s.io/kubernetes/examples/explorer/Makefile generated vendored Normal file
View file

@ -0,0 +1,30 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
all: push
# Keep this one version ahead, so no one accidentally blows away the latest published version.
TAG = 1.1
explorer: explorer.go
CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-w' ./explorer.go
container: explorer
docker build -t gcr.io/google_containers/explorer:$(TAG) .
push: container
gcloud docker -- push gcr.io/google_containers/explorer:$(TAG)
clean:
rm -f explorer

133
vendor/k8s.io/kubernetes/examples/explorer/README.md generated vendored Normal file
View file

@ -0,0 +1,133 @@
### explorer
Explorer is a little container for examining the runtime environment Kubernetes produces for your pods.
The intended use is to substitute gcr.io/google_containers/explorer for your intended container, and then visit it via the proxy.
Currently, you can look at:
* The environment variables to make sure Kubernetes is doing what you expect.
* The filesystem to make sure the mounted volumes and files are also what you expect.
* Perform DNS lookups, to see how DNS works.
`pod.yaml` is supplied as an example. You can control the port it serves on with the -port flag.
Example from command line (the DNS lookup looks better from a web browser):
```console
$ kubectl create -f examples/explorer/pod.yaml
$ kubectl proxy &
Starting to serve on localhost:8001
$ curl localhost:8001/api/v1/proxy/namespaces/default/pods/explorer:8080/vars/
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
HOSTNAME=explorer
KIBANA_LOGGING_PORT_5601_TCP_PORT=5601
KUBERNETES_SERVICE_HOST=10.0.0.2
MONITORING_GRAFANA_PORT_80_TCP_PROTO=tcp
MONITORING_INFLUXDB_UI_PORT_80_TCP_PROTO=tcp
KIBANA_LOGGING_SERVICE_PORT=5601
MONITORING_HEAPSTER_PORT_80_TCP_PORT=80
MONITORING_INFLUXDB_UI_PORT_80_TCP_PORT=80
KIBANA_LOGGING_SERVICE_HOST=10.0.204.206
KIBANA_LOGGING_PORT_5601_TCP=tcp://10.0.204.206:5601
KUBERNETES_PORT=tcp://10.0.0.2:443
MONITORING_INFLUXDB_PORT=tcp://10.0.2.30:80
MONITORING_INFLUXDB_PORT_80_TCP_PROTO=tcp
MONITORING_INFLUXDB_UI_PORT=tcp://10.0.36.78:80
KUBE_DNS_PORT_53_UDP=udp://10.0.0.10:53
MONITORING_INFLUXDB_SERVICE_HOST=10.0.2.30
ELASTICSEARCH_LOGGING_PORT=tcp://10.0.48.200:9200
ELASTICSEARCH_LOGGING_PORT_9200_TCP_PORT=9200
KUBERNETES_PORT_443_TCP=tcp://10.0.0.2:443
ELASTICSEARCH_LOGGING_PORT_9200_TCP_PROTO=tcp
KIBANA_LOGGING_PORT_5601_TCP_ADDR=10.0.204.206
KUBE_DNS_PORT_53_UDP_ADDR=10.0.0.10
MONITORING_HEAPSTER_PORT_80_TCP_PROTO=tcp
MONITORING_INFLUXDB_PORT_80_TCP_ADDR=10.0.2.30
KIBANA_LOGGING_PORT=tcp://10.0.204.206:5601
MONITORING_GRAFANA_SERVICE_PORT=80
MONITORING_HEAPSTER_SERVICE_PORT=80
MONITORING_HEAPSTER_PORT_80_TCP=tcp://10.0.150.238:80
ELASTICSEARCH_LOGGING_PORT_9200_TCP=tcp://10.0.48.200:9200
ELASTICSEARCH_LOGGING_PORT_9200_TCP_ADDR=10.0.48.200
MONITORING_GRAFANA_PORT_80_TCP_PORT=80
MONITORING_HEAPSTER_PORT=tcp://10.0.150.238:80
MONITORING_INFLUXDB_PORT_80_TCP=tcp://10.0.2.30:80
KUBE_DNS_SERVICE_PORT=53
KUBE_DNS_PORT_53_UDP_PORT=53
MONITORING_GRAFANA_PORT_80_TCP_ADDR=10.0.100.174
MONITORING_INFLUXDB_UI_SERVICE_HOST=10.0.36.78
KIBANA_LOGGING_PORT_5601_TCP_PROTO=tcp
MONITORING_GRAFANA_PORT=tcp://10.0.100.174:80
MONITORING_INFLUXDB_UI_PORT_80_TCP_ADDR=10.0.36.78
KUBE_DNS_SERVICE_HOST=10.0.0.10
KUBERNETES_PORT_443_TCP_PORT=443
MONITORING_HEAPSTER_PORT_80_TCP_ADDR=10.0.150.238
MONITORING_INFLUXDB_UI_SERVICE_PORT=80
KUBE_DNS_PORT=udp://10.0.0.10:53
ELASTICSEARCH_LOGGING_SERVICE_HOST=10.0.48.200
KUBERNETES_SERVICE_PORT=443
MONITORING_HEAPSTER_SERVICE_HOST=10.0.150.238
MONITORING_INFLUXDB_SERVICE_PORT=80
MONITORING_INFLUXDB_PORT_80_TCP_PORT=80
KUBE_DNS_PORT_53_UDP_PROTO=udp
MONITORING_GRAFANA_PORT_80_TCP=tcp://10.0.100.174:80
ELASTICSEARCH_LOGGING_SERVICE_PORT=9200
MONITORING_GRAFANA_SERVICE_HOST=10.0.100.174
MONITORING_INFLUXDB_UI_PORT_80_TCP=tcp://10.0.36.78:80
KUBERNETES_PORT_443_TCP_PROTO=tcp
KUBERNETES_PORT_443_TCP_ADDR=10.0.0.2
HOME=/
$ curl localhost:8001/api/v1/proxy/namespaces/default/pods/explorer:8080/fs/
mount/
var/
.dockerenv
etc/
dev/
proc/
.dockerinit
sys/
README.md
explorer
$ curl localhost:8001/api/v1/proxy/namespaces/default/pods/explorer:8080/dns?q=elasticsearch-logging
<html><head></head><body>
<form action="/api/v1/proxy/namespaces/default/pods/explorer:8080/dns">
<input name="q" type="text" value="elasticsearch-logging"/>
<button type="submit">Lookup</button>
</form>
<br/><br/><pre>LookupNS(elasticsearch-logging):
Result: ([]*net.NS)<nil>
Error: &lt;*&gt;lookup elasticsearch-logging: no such host
LookupTXT(elasticsearch-logging):
Result: ([]string)<nil>
Error: &lt;*&gt;lookup elasticsearch-logging: no such host
LookupSRV(&#34;&#34;, &#34;&#34;, elasticsearch-logging):
cname: elasticsearch-logging.default.svc.cluster.local.
Result: ([]*net.SRV)[&lt;*&gt;{Target:(string)elasticsearch-logging.default.svc.cluster.local. Port:(uint16)9200 Priority:(uint16)10 Weight:(uint16)100}]
Error: <nil>
LookupHost(elasticsearch-logging):
Result: ([]string)[10.0.60.245]
Error: <nil>
LookupIP(elasticsearch-logging):
Result: ([]net.IP)[10.0.60.245]
Error: <nil>
LookupMX(elasticsearch-logging):
Result: ([]*net.MX)<nil>
Error: &lt;*&gt;lookup elasticsearch-logging: no such host
</nil></nil></nil></nil></nil></nil></pre>
</body></html>
```
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/explorer/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->

122
vendor/k8s.io/kubernetes/examples/explorer/explorer.go generated vendored Normal file
View file

@ -0,0 +1,122 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// A tiny web server for viewing the environment kubernetes creates for your
// containers. It exposes the filesystem and environment variables via http
// server.
package main
import (
"flag"
"fmt"
"log"
"net"
"net/http"
"os"
"github.com/davecgh/go-spew/spew"
)
var (
port = flag.Int("port", 8080, "Port number to serve at.")
)
func main() {
flag.Parse()
hostname, err := os.Hostname()
if err != nil {
log.Fatalf("Error getting hostname: %v", err)
}
links := []struct {
link, desc string
}{
{"/fs/", "Complete file system as seen by this container."},
{"/vars/", "Environment variables as seen by this container."},
{"/hostname/", "Hostname as seen by this container."},
{"/dns?q=google.com", "Explore DNS records seen by this container."},
{"/quit", "Cause this container to exit."},
}
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "<b> Kubernetes environment explorer </b><br/><br/>")
for _, v := range links {
fmt.Fprintf(w, `<a href="%v">%v: %v</a><br/>`, v.link, v.link, v.desc)
}
})
http.Handle("/fs/", http.StripPrefix("/fs/", http.FileServer(http.Dir("/"))))
http.HandleFunc("/vars/", func(w http.ResponseWriter, r *http.Request) {
for _, v := range os.Environ() {
fmt.Fprintf(w, "%v\n", v)
}
})
http.HandleFunc("/hostname/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, hostname)
})
http.HandleFunc("/quit", func(w http.ResponseWriter, r *http.Request) {
os.Exit(0)
})
http.HandleFunc("/dns", dns)
go log.Fatal(http.ListenAndServe(fmt.Sprintf("0.0.0.0:%d", *port), nil))
select {}
}
func dns(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query().Get("q")
// Note that the below is NOT safe from input attacks, but that's OK
// because this is just for debugging.
fmt.Fprintf(w, `<html><body>
<form action="/dns">
<input name="q" type="text" value="%v"></input>
<button type="submit">Lookup</button>
</form>
<br/><br/><pre>`, q)
{
res, err := net.LookupNS(q)
spew.Fprintf(w, "LookupNS(%v):\nResult: %#v\nError: %v\n\n", q, res, err)
}
{
res, err := net.LookupTXT(q)
spew.Fprintf(w, "LookupTXT(%v):\nResult: %#v\nError: %v\n\n", q, res, err)
}
{
cname, res, err := net.LookupSRV("", "", q)
spew.Fprintf(w, `LookupSRV("", "", %v):
cname: %v
Result: %#v
Error: %v
`, q, cname, res, err)
}
{
res, err := net.LookupHost(q)
spew.Fprintf(w, "LookupHost(%v):\nResult: %#v\nError: %v\n\n", q, res, err)
}
{
res, err := net.LookupIP(q)
spew.Fprintf(w, "LookupIP(%v):\nResult: %#v\nError: %v\n\n", q, res, err)
}
{
res, err := net.LookupMX(q)
spew.Fprintf(w, "LookupMX(%v):\nResult: %#v\nError: %v\n\n", q, res, err)
}
fmt.Fprintf(w, `</pre>
</body>
</html>`)
}

18
vendor/k8s.io/kubernetes/examples/explorer/pod.yaml generated vendored Normal file
View file

@ -0,0 +1,18 @@
apiVersion: v1
kind: Pod
metadata:
name: explorer
spec:
containers:
- name: explorer
image: gcr.io/google_containers/explorer:1.0
args: ["-port=8080"]
ports:
- containerPort: 8080
protocol: TCP
volumeMounts:
- mountPath: "/mount/test-volume"
name: test-volume
volumes:
- name: test-volume
emptyDir: {}

View file

@ -0,0 +1,271 @@
## Guestbook Example
This example shows how to build a simple multi-tier web application using Kubernetes and Docker. The application consists of a web front-end, Redis master for storage, and replicated set of Redis slaves, all for which we will create Kubernetes replication controllers, pods, and services.
If you are running a cluster in Google Container Engine (GKE), instead see the [Guestbook Example for Google Container Engine](https://cloud.google.com/container-engine/docs/tutorials/guestbook).
##### Table of Contents
* [Step Zero: Prerequisites](#step-zero)
* [Step One: Create the Redis master pod](#step-one)
* [Step Two: Create the Redis master service](#step-two)
* [Step Three: Create the Redis slave pods](#step-three)
* [Step Four: Create the Redis slave service](#step-four)
* [Step Five: Create the guestbook pods](#step-five)
* [Step Six: Create the guestbook service](#step-six)
* [Step Seven: View the guestbook](#step-seven)
* [Step Eight: Cleanup](#step-eight)
### Step Zero: Prerequisites <a id="step-zero"></a>
This example assumes that you have a working cluster. See the [Getting Started Guides](../../docs/getting-started-guides/) for details about creating a cluster.
**Tip:** View all the `kubectl` commands, including their options and descriptions in the [kubectl CLI reference](../../docs/user-guide/kubectl/kubectl.md).
### Step One: Create the Redis master pod<a id="step-one"></a>
Use the `examples/guestbook-go/redis-master-controller.json` file to create a [replication controller](../../docs/user-guide/replication-controller.md) and Redis master [pod](../../docs/user-guide/pods.md). The pod runs a Redis key-value server in a container. Using a replication controller is the preferred way to launch long-running pods, even for 1 replica, so that the pod benefits from the self-healing mechanism in Kubernetes (keeps the pods alive).
1. Use the [redis-master-controller.json](redis-master-controller.json) file to create the Redis master replication controller in your Kubernetes cluster by running the `kubectl create -f` *`filename`* command:
```console
$ kubectl create -f examples/guestbook-go/redis-master-controller.json
replicationcontrollers/redis-master
```
2. To verify that the redis-master controller is up, list the replication controllers you created in the cluster with the `kubectl get rc` command(if you don't specify a `--namespace`, the `default` namespace will be used. The same below):
```console
$ kubectl get rc
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
redis-master redis-master gurpartap/redis app=redis,role=master 1
...
```
Result: The replication controller then creates the single Redis master pod.
3. To verify that the redis-master pod is running, list the pods you created in cluster with the `kubectl get pods` command:
```console
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
redis-master-xx4uv 1/1 Running 0 1m
...
```
Result: You'll see a single Redis master pod and the machine where the pod is running after the pod gets placed (may take up to thirty seconds).
4. To verify what containers are running in the redis-master pod, you can SSH to that machine with `gcloud compute ssh --zone` *`zone_name`* *`host_name`* and then run `docker ps`:
```console
me@workstation$ gcloud compute ssh --zone us-central1-b kubernetes-node-bz1p
me@kubernetes-node-3:~$ sudo docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS
d5c458dabe50 redis "/entrypoint.sh redis" 5 minutes ago Up 5 minutes
```
Note: The initial `docker pull` can take a few minutes, depending on network conditions.
### Step Two: Create the Redis master service <a id="step-two"></a>
A Kubernetes [service](../../docs/user-guide/services.md) is a named load balancer that proxies traffic to one or more pods. The services in a Kubernetes cluster are discoverable inside other pods via environment variables or DNS.
Services find the pods to load balance based on pod labels. The pod that you created in Step One has the label `app=redis` and `role=master`. The selector field of the service determines which pods will receive the traffic sent to the service.
1. Use the [redis-master-service.json](redis-master-service.json) file to create the service in your Kubernetes cluster by running the `kubectl create -f` *`filename`* command:
```console
$ kubectl create -f examples/guestbook-go/redis-master-service.json
services/redis-master
```
2. To verify that the redis-master service is up, list the services you created in the cluster with the `kubectl get services` command:
```console
$ kubectl get services
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
redis-master 10.0.136.3 <none> 6379/TCP app=redis,role=master 1h
...
```
Result: All new pods will see the `redis-master` service running on the host (`$REDIS_MASTER_SERVICE_HOST` environment variable) at port 6379, or running on `redis-master:6379`. After the service is created, the service proxy on each node is configured to set up a proxy on the specified port (in our example, that's port 6379).
### Step Three: Create the Redis slave pods <a id="step-three"></a>
The Redis master we created earlier is a single pod (REPLICAS = 1), while the Redis read slaves we are creating here are 'replicated' pods. In Kubernetes, a replication controller is responsible for managing the multiple instances of a replicated pod.
1. Use the file [redis-slave-controller.json](redis-slave-controller.json) to create the replication controller by running the `kubectl create -f` *`filename`* command:
```console
$ kubectl create -f examples/guestbook-go/redis-slave-controller.json
replicationcontrollers/redis-slave
```
2. To verify that the redis-slave controller is running, run the `kubectl get rc` command:
```console
$ kubectl get rc
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
redis-master redis-master redis app=redis,role=master 1
redis-slave redis-slave kubernetes/redis-slave:v2 app=redis,role=slave 2
...
```
Result: The replication controller creates and configures the Redis slave pods through the redis-master service (name:port pair, in our example that's `redis-master:6379`).
Example:
The Redis slaves get started by the replication controller with the following command:
```console
redis-server --slaveof redis-master 6379
```
3. To verify that the Redis master and slaves pods are running, run the `kubectl get pods` command:
```console
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
redis-master-xx4uv 1/1 Running 0 18m
redis-slave-b6wj4 1/1 Running 0 1m
redis-slave-iai40 1/1 Running 0 1m
...
```
Result: You see the single Redis master and two Redis slave pods.
### Step Four: Create the Redis slave service <a id="step-four"></a>
Just like the master, we want to have a service to proxy connections to the read slaves. In this case, in addition to discovery, the Redis slave service provides transparent load balancing to clients.
1. Use the [redis-slave-service.json](redis-slave-service.json) file to create the Redis slave service by running the `kubectl create -f` *`filename`* command:
```console
$ kubectl create -f examples/guestbook-go/redis-slave-service.json
services/redis-slave
```
2. To verify that the redis-slave service is up, list the services you created in the cluster with the `kubectl get services` command:
```console
$ kubectl get services
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
redis-master 10.0.136.3 <none> 6379/TCP app=redis,role=master 1h
redis-slave 10.0.21.92 <none> 6379/TCP app-redis,role=slave 1h
...
```
Result: The service is created with labels `app=redis` and `role=slave` to identify that the pods are running the Redis slaves.
Tip: It is helpful to set labels on your services themselves--as we've done here--to make it easy to locate them later.
### Step Five: Create the guestbook pods <a id="step-five"></a>
This is a simple Go `net/http` ([negroni](https://github.com/codegangsta/negroni) based) server that is configured to talk to either the slave or master services depending on whether the request is a read or a write. The pods we are creating expose a simple JSON interface and serves a jQuery-Ajax based UI. Like the Redis read slaves, these pods are also managed by a replication controller.
1. Use the [guestbook-controller.json](guestbook-controller.json) file to create the guestbook replication controller by running the `kubectl create -f` *`filename`* command:
```console
$ kubectl create -f examples/guestbook-go/guestbook-controller.json
replicationcontrollers/guestbook
```
Tip: If you want to modify the guestbook code open the `_src` of this example and read the README.md and the Makefile. If you have pushed your custom image be sure to update the `image` accordingly in the guestbook-controller.json.
2. To verify that the guestbook replication controller is running, run the `kubectl get rc` command:
```console
$ kubectl get rc
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
guestbook guestbook gcr.io/google_containers/guestbook:v3 app=guestbook 3
redis-master redis-master redis app=redis,role=master 1
redis-slave redis-slave kubernetes/redis-slave:v2 app=redis,role=slave 2
...
```
3. To verify that the guestbook pods are running (it might take up to thirty seconds to create the pods), list the pods you created in cluster with the `kubectl get pods` command:
```console
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
guestbook-3crgn 1/1 Running 0 2m
guestbook-gv7i6 1/1 Running 0 2m
guestbook-x405a 1/1 Running 0 2m
redis-master-xx4uv 1/1 Running 0 23m
redis-slave-b6wj4 1/1 Running 0 6m
redis-slave-iai40 1/1 Running 0 6m
...
```
Result: You see a single Redis master, two Redis slaves, and three guestbook pods.
### Step Six: Create the guestbook service <a id="step-six"></a>
Just like the others, we create a service to group the guestbook pods but this time, to make the guestbook front-end externally visible, we specify `"type": "LoadBalancer"`.
1. Use the [guestbook-service.json](guestbook-service.json) file to create the guestbook service by running the `kubectl create -f` *`filename`* command:
```console
$ kubectl create -f examples/guestbook-go/guestbook-service.json
```
2. To verify that the guestbook service is up, list the services you created in the cluster with the `kubectl get services` command:
```console
$ kubectl get services
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
guestbook 10.0.217.218 146.148.81.8 3000/TCP app=guestbook 1h
redis-master 10.0.136.3 <none> 6379/TCP app=redis,role=master 1h
redis-slave 10.0.21.92 <none> 6379/TCP app-redis,role=slave 1h
...
```
Result: The service is created with label `app=guestbook`.
### Step Seven: View the guestbook <a id="step-seven"></a>
You can now play with the guestbook that you just created by opening it in a browser (it might take a few moments for the guestbook to come up).
* **Local Host:**
If you are running Kubernetes locally, to view the guestbook, navigate to `http://localhost:3000` in your browser.
* **Remote Host:**
1. To view the guestbook on a remote host, locate the external IP of the load balancer in the **IP** column of the `kubectl get services` output. In our example, the internal IP address is `10.0.217.218` and the external IP address is `146.148.81.8` (*Note: you might need to scroll to see the IP column*).
2. Append port `3000` to the IP address (for example `http://146.148.81.8:3000`), and then navigate to that address in your browser.
Result: The guestbook displays in your browser:
![Guestbook](guestbook-page.png)
**Further Reading:**
If you're using Google Compute Engine, see the details about limiting traffic to specific sources at [Google Compute Engine firewall documentation][gce-firewall-docs].
[cloud-console]: https://console.developer.google.com
[gce-firewall-docs]: https://cloud.google.com/compute/docs/networking#firewalls
### Step Eight: Cleanup <a id="step-eight"></a>
After you're done playing with the guestbook, you can cleanup by deleting the guestbook service and removing the associated resources that were created, including load balancers, forwarding rules, target pools, and Kubernetes replication controllers and services.
Delete all the resources by running the following `kubectl delete -f` *`filename`* command:
```console
$ kubectl delete -f examples/guestbook-go
guestbook-controller
guestbook
redid-master-controller
redis-master
redis-slave-controller
redis-slave
```
Tip: To turn down your Kubernetes cluster, follow the corresponding instructions in the version of the
[Getting Started Guides](../../docs/getting-started-guides/) that you previously used to create your cluster.
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/guestbook-go/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->

View file

@ -0,0 +1,37 @@
{
"kind":"ReplicationController",
"apiVersion":"v1",
"metadata":{
"name":"guestbook",
"labels":{
"app":"guestbook"
}
},
"spec":{
"replicas":3,
"selector":{
"app":"guestbook"
},
"template":{
"metadata":{
"labels":{
"app":"guestbook"
}
},
"spec":{
"containers":[
{
"name":"guestbook",
"image":"gcr.io/google_containers/guestbook:v3",
"ports":[
{
"name":"http-server",
"containerPort":3000
}
]
}
]
}
}
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

View file

@ -0,0 +1,22 @@
{
"kind":"Service",
"apiVersion":"v1",
"metadata":{
"name":"guestbook",
"labels":{
"app":"guestbook"
}
},
"spec":{
"ports": [
{
"port":3000,
"targetPort":"http-server"
}
],
"selector":{
"app":"guestbook"
},
"type": "LoadBalancer"
}
}

View file

@ -0,0 +1,40 @@
{
"kind":"ReplicationController",
"apiVersion":"v1",
"metadata":{
"name":"redis-master",
"labels":{
"app":"redis",
"role":"master"
}
},
"spec":{
"replicas":1,
"selector":{
"app":"redis",
"role":"master"
},
"template":{
"metadata":{
"labels":{
"app":"redis",
"role":"master"
}
},
"spec":{
"containers":[
{
"name":"redis-master",
"image":"redis:2.8.23",
"ports":[
{
"name":"redis-server",
"containerPort":6379
}
]
}
]
}
}
}
}

View file

@ -0,0 +1,23 @@
{
"kind":"Service",
"apiVersion":"v1",
"metadata":{
"name":"redis-master",
"labels":{
"app":"redis",
"role":"master"
}
},
"spec":{
"ports": [
{
"port":6379,
"targetPort":"redis-server"
}
],
"selector":{
"app":"redis",
"role":"master"
}
}
}

View file

@ -0,0 +1,40 @@
{
"kind":"ReplicationController",
"apiVersion":"v1",
"metadata":{
"name":"redis-slave",
"labels":{
"app":"redis",
"role":"slave"
}
},
"spec":{
"replicas":2,
"selector":{
"app":"redis",
"role":"slave"
},
"template":{
"metadata":{
"labels":{
"app":"redis",
"role":"slave"
}
},
"spec":{
"containers":[
{
"name":"redis-slave",
"image":"kubernetes/redis-slave:v2",
"ports":[
{
"name":"redis-server",
"containerPort":6379
}
]
}
]
}
}
}
}

View file

@ -0,0 +1,23 @@
{
"kind":"Service",
"apiVersion":"v1",
"metadata":{
"name":"redis-slave",
"labels":{
"app":"redis",
"role":"slave"
}
},
"spec":{
"ports": [
{
"port":6379,
"targetPort":"redis-server"
}
],
"selector":{
"app":"redis",
"role":"slave"
}
}
}

702
vendor/k8s.io/kubernetes/examples/guestbook/README.md generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,60 @@
apiVersion: v1
kind: Service
metadata:
name: frontend
labels:
app: guestbook
tier: frontend
spec:
# if your cluster supports it, uncomment the following to automatically create
# an external load-balanced IP for the frontend service.
# type: LoadBalancer
ports:
# the port that this service should serve on
- port: 80
selector:
app: guestbook
tier: frontend
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: frontend
# these labels can be applied automatically
# from the labels in the pod template if not set
# labels:
# app: guestbook
# tier: frontend
spec:
# this replicas value is default
# modify it according to your case
replicas: 3
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# matchLabels:
# app: guestbook
# tier: frontend
template:
metadata:
labels:
app: guestbook
tier: frontend
spec:
containers:
- name: php-redis
image: gcr.io/google-samples/gb-frontend:v4
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access environment variables to find service host
# info, comment out the 'value: dns' line above, and uncomment the
# line below.
# value: env
ports:
- containerPort: 80

View file

@ -0,0 +1,179 @@
apiVersion: v1
kind: Service
metadata:
name: redis-master
labels:
app: redis
tier: backend
role: master
spec:
ports:
# the port that this service should serve on
- port: 6379
targetPort: 6379
selector:
app: redis
tier: backend
role: master
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: redis-master
# these labels can be applied automatically
# from the labels in the pod template if not set
# labels:
# app: redis
# role: master
# tier: backend
spec:
# this replicas value is default
# modify it according to your case
replicas: 1
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# matchLabels:
# app: guestbook
# role: master
# tier: backend
template:
metadata:
labels:
app: redis
role: master
tier: backend
spec:
containers:
- name: master
image: gcr.io/google_containers/redis:e2e # or just image: redis
resources:
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 6379
---
apiVersion: v1
kind: Service
metadata:
name: redis-slave
labels:
app: redis
tier: backend
role: slave
spec:
ports:
# the port that this service should serve on
- port: 6379
selector:
app: redis
tier: backend
role: slave
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: redis-slave
# these labels can be applied automatically
# from the labels in the pod template if not set
# labels:
# app: redis
# role: slave
# tier: backend
spec:
# this replicas value is default
# modify it according to your case
replicas: 2
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# matchLabels:
# app: guestbook
# role: slave
# tier: backend
template:
metadata:
labels:
app: redis
role: slave
tier: backend
spec:
containers:
- name: slave
image: gcr.io/google_samples/gb-redisslave:v1
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access an environment variable to find the master
# service's host, comment out the 'value: dns' line above, and
# uncomment the line below.
# value: env
ports:
- containerPort: 6379
---
apiVersion: v1
kind: Service
metadata:
name: frontend
labels:
app: guestbook
tier: frontend
spec:
# if your cluster supports it, uncomment the following to automatically create
# an external load-balanced IP for the frontend service.
# type: LoadBalancer
ports:
# the port that this service should serve on
- port: 80
selector:
app: guestbook
tier: frontend
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: frontend
# these labels can be applied automatically
# from the labels in the pod template if not set
# labels:
# app: guestbook
# tier: frontend
spec:
# this replicas value is default
# modify it according to your case
replicas: 3
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# matchLabels:
# app: guestbook
# tier: frontend
template:
metadata:
labels:
app: guestbook
tier: frontend
spec:
containers:
- name: php-redis
image: gcr.io/google-samples/gb-frontend:v4
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access environment variables to find service host
# info, comment out the 'value: dns' line above, and uncomment the
# line below.
# value: env
ports:
- containerPort: 80

View file

@ -0,0 +1,62 @@
apiVersion: v1
kind: Service
metadata:
name: redis-slave
labels:
app: redis
role: slave
tier: backend
spec:
ports:
# the port that this service should serve on
- port: 6379
selector:
app: redis
role: slave
tier: backend
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: redis-slave
# these labels can be applied automatically
# from the labels in the pod template if not set
# labels:
# app: redis
# role: slave
# tier: backend
spec:
# this replicas value is default
# modify it according to your case
replicas: 2
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# matchLabels:
# app: guestbook
# role: slave
# tier: backend
template:
metadata:
labels:
app: redis
role: slave
tier: backend
spec:
containers:
- name: slave
image: gcr.io/google_samples/gb-redisslave:v1
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access an environment variable to find the master
# service's host, comment out the 'value: dns' line above, and
# uncomment the line below.
# value: env
ports:
- containerPort: 6379

View file

@ -0,0 +1,42 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: frontend
# these labels can be applied automatically
# from the labels in the pod template if not set
# labels:
# app: guestbook
# tier: frontend
spec:
# this replicas value is default
# modify it according to your case
replicas: 3
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# matchLabels:
# app: guestbook
# tier: frontend
template:
metadata:
labels:
app: guestbook
tier: frontend
spec:
containers:
- name: php-redis
image: gcr.io/google-samples/gb-frontend:v4
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access environment variables to find service host
# info, comment out the 'value: dns' line above, and uncomment the
# line below.
# value: env
ports:
- containerPort: 80

View file

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: frontend
labels:
app: guestbook
tier: frontend
spec:
# if your cluster supports it, uncomment the following to automatically create
# an external load-balanced IP for the frontend service.
# type: LoadBalancer
ports:
# the port that this service should serve on
- port: 80
selector:
app: guestbook
tier: frontend

View file

@ -0,0 +1,41 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: frontend
# these labels can be applied automatically
# from the labels in the pod template if not set
# labels:
# app: guestbook
# tier: frontend
spec:
# this replicas value is default
# modify it according to your case
replicas: 3
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# app: guestbook
# tier: frontend
template:
metadata:
labels:
app: guestbook
tier: frontend
spec:
containers:
- name: php-redis
image: gcr.io/google_samples/gb-frontend:v4
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access environment variables to find service host
# info, comment out the 'value: dns' line above, and uncomment the
# line below.
# value: env
ports:
- containerPort: 80

View file

@ -0,0 +1,36 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: redis-master
# these labels can be applied automatically
# from the labels in the pod template if not set
labels:
app: redis
role: master
tier: backend
spec:
# this replicas value is default
# modify it according to your case
replicas: 1
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# app: guestbook
# role: master
# tier: backend
template:
metadata:
labels:
app: redis
role: master
tier: backend
spec:
containers:
- name: master
image: gcr.io/google_containers/redis:e2e # or just image: redis
resources:
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 6379

View file

@ -0,0 +1,44 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: redis-slave
# these labels can be applied automatically
# from the labels in the pod template if not set
labels:
app: redis
role: slave
tier: backend
spec:
# this replicas value is default
# modify it according to your case
replicas: 2
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# app: guestbook
# role: slave
# tier: backend
template:
metadata:
labels:
app: redis
role: slave
tier: backend
spec:
containers:
- name: slave
image: gcr.io/google_samples/gb-redisslave:v1
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access an environment variable to find the master
# service's host, comment out the 'value: dns' line above, and
# uncomment the line below.
# value: env
ports:
- containerPort: 6379

View file

@ -0,0 +1,31 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM php:5-apache
RUN apt-get update
RUN apt-get install -y php-pear
RUN pear channel-discover pear.nrk.io
RUN pear install nrk/Predis
# If the container's stdio is connected to systemd-journald,
# /proc/self/fd/{1,2} are Unix sockets and apache will not be able to open()
# them. Use "cat" to write directly to the already opened fds without opening
# them again.
RUN sed -i 's#ErrorLog /proc/self/fd/2#ErrorLog "|$/bin/cat 1>\&2"#' /etc/apache2/apache2.conf
RUN sed -i 's#CustomLog /proc/self/fd/1 combined#CustomLog "|/bin/cat" combined#' /etc/apache2/apache2.conf
ADD guestbook.php /var/www/html/guestbook.php
ADD controllers.js /var/www/html/controllers.js
ADD index.html /var/www/html/index.html

View file

@ -0,0 +1,29 @@
var redisApp = angular.module('redis', ['ui.bootstrap']);
/**
* Constructor
*/
function RedisController() {}
RedisController.prototype.onRedis = function() {
this.scope_.messages.push(this.scope_.msg);
this.scope_.msg = "";
var value = this.scope_.messages.join();
this.http_.get("guestbook.php?cmd=set&key=messages&value=" + value)
.success(angular.bind(this, function(data) {
this.scope_.redisResponse = "Updated.";
}));
};
redisApp.controller('RedisCtrl', function ($scope, $http, $location) {
$scope.controller = new RedisController();
$scope.controller.scope_ = $scope;
$scope.controller.location_ = $location;
$scope.controller.http_ = $http;
$scope.controller.http_.get("guestbook.php?cmd=get&key=messages")
.success(function(data) {
console.log(data);
$scope.messages = data.data.split(",");
});
});

View file

@ -0,0 +1,41 @@
<?php
error_reporting(E_ALL);
ini_set('display_errors', 1);
require 'Predis/Autoloader.php';
Predis\Autoloader::register();
if (isset($_GET['cmd']) === true) {
$host = 'redis-master';
if (getenv('GET_HOSTS_FROM') == 'env') {
$host = getenv('REDIS_MASTER_SERVICE_HOST');
}
header('Content-Type: application/json');
if ($_GET['cmd'] == 'set') {
$client = new Predis\Client([
'scheme' => 'tcp',
'host' => $host,
'port' => 6379,
]);
$client->set($_GET['key'], $_GET['value']);
print('{"message": "Updated"}');
} else {
$host = 'redis-slave';
if (getenv('GET_HOSTS_FROM') == 'env') {
$host = getenv('REDIS_SLAVE_SERVICE_HOST');
}
$client = new Predis\Client([
'scheme' => 'tcp',
'host' => $host,
'port' => 6379,
]);
$value = $client->get($_GET['key']);
print('{"data": "' . $value . '"}');
}
} else {
phpinfo();
} ?>

View file

@ -0,0 +1,25 @@
<html ng-app="redis">
<head>
<title>Guestbook</title>
<link rel="stylesheet" href="//netdna.bootstrapcdn.com/bootstrap/3.1.1/css/bootstrap.min.css">
<script src="https://ajax.googleapis.com/ajax/libs/angularjs/1.2.12/angular.min.js"></script>
<script src="controllers.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/angular-ui-bootstrap/0.13.0/ui-bootstrap-tpls.js"></script>
</head>
<body ng-controller="RedisCtrl">
<div style="width: 50%; margin-left: 20px">
<h2>Guestbook</h2>
<form>
<fieldset>
<input ng-model="msg" placeholder="Messages" class="form-control" type="text" name="input"><br>
<button type="button" class="btn btn-primary" ng-click="controller.onRedis()">Submit</button>
</fieldset>
</form>
<div>
<div ng-repeat="msg in messages track by $index">
{{msg}}
</div>
</div>
</div>
</body>
</html>

View file

@ -0,0 +1,37 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: redis-master
# these labels can be applied automatically
# from the labels in the pod template if not set
# labels:
# app: redis
# role: master
# tier: backend
spec:
# this replicas value is default
# modify it according to your case
replicas: 1
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# matchLabels:
# app: guestbook
# role: master
# tier: backend
template:
metadata:
labels:
app: redis
role: master
tier: backend
spec:
containers:
- name: master
image: gcr.io/google_containers/redis:e2e # or just image: redis
resources:
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 6379

View file

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: redis-master
labels:
app: redis
role: master
tier: backend
spec:
ports:
# the port that this service should serve on
- port: 6379
targetPort: 6379
selector:
app: redis
role: master
tier: backend

View file

@ -0,0 +1,45 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: redis-slave
# these labels can be applied automatically
# from the labels in the pod template if not set
# labels:
# app: redis
# role: slave
# tier: backend
spec:
# this replicas value is default
# modify it according to your case
replicas: 2
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# matchLabels:
# app: guestbook
# role: slave
# tier: backend
template:
metadata:
labels:
app: redis
role: slave
tier: backend
spec:
containers:
- name: slave
image: gcr.io/google_samples/gb-redisslave:v1
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access an environment variable to find the master
# service's host, comment out the 'value: dns' line above, and
# uncomment the line below.
# value: env
ports:
- containerPort: 6379

View file

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: redis-slave
labels:
app: redis
role: slave
tier: backend
spec:
ports:
# the port that this service should serve on
- port: 6379
selector:
app: redis
role: slave
tier: backend

View file

@ -0,0 +1,21 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM redis
ADD run.sh /run.sh
RUN chmod a+x /run.sh
CMD /run.sh

View file

@ -0,0 +1,21 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [[ ${GET_HOSTS_FROM:-dns} == "env" ]]; then
redis-server --slaveof ${REDIS_MASTER_SERVICE_HOST} 6379
else
redis-server --slaveof redis-master 6379
fi

90
vendor/k8s.io/kubernetes/examples/guidelines.md generated vendored Normal file
View file

@ -0,0 +1,90 @@
# Example Guidelines
## An Example Is
An example demonstrates running an application/framework/workload on
Kubernetes in a meaningful way. It is educational and informative.
Examples are not:
* Full app deployments, ready to use, with no explanation. These
belong either
[here](https://github.com/kubernetes/application-dm-templates) or in
something like [Helm](https://github.com/helm/charts).
* Simple toys to show how to use a Kubernetes feature. These belong in
the [user guide](../docs/user-guide/).
* Demos that follow a script to show a Kubernetes feature in
action. Example: killing a node to demonstrate controller
self-healing.
* A tutorial which guides the user through multiple progressively more
complex deployments to arrive at the final solution. An example
should just demonstrate how to setup the correct deployment
## An Example Includes
### Up front
* Has a "this is what you'll learn" section.
* Has a Table of Contents.
* Has a section that brings up the app in the fewest number of
commands (TL;DR / quickstart), without cloning the repo (kubectl
apply -f http://...).
* Points to documentation of prerequisites.
* [Create a cluster](../docs/getting-started-guides/) (e.g., single-node docker).
* [Setup kubectl](../docs/user-guide/prereqs.md).
* etc.
* Should specify which release of Kubernetes is required and any other
prerequisites, such as DNS, a cloudprovider with PV provisioning, a
cloudprovider with external load balancers, etc.
* Point to general documentation about alternatives for those
mechanisms rather than present the alternatives in each example.
* Tries to balance between using using new features, and being
compatible across environments.
### Throughout
* Should point to documentation on first mention:
[kubectl](../docs/user-guide/kubectl-overview.md),
[pods](../docs/user-guide/pods.md),
[services](../docs/user-guide/services.md),
[deployments](../docs/user-guide/deployments.md),
[replication controllers](../docs/user-guide/replication-controller.md),
[jobs](../docs/user-guide/jobs.md),
[labels](../docs/user-guide/labels.md),
[persistent volumes](../docs/user-guide/persistent-volumes.md),
etc.
* Most examples should be cloudprovider-independent (e.g., using PVCs, not PDs).
* Other examples with cloudprovider-specific bits could be somewhere else.
* Actually show the app working -- console output, and or screenshots.
* Ascii animations and screencasts are recommended.
* Follows [config best practices](../docs/user-guide/config-best-practices.md).
* Shouldn't duplicate the [thorough walk-through](../docs/user-guide/#thorough-walkthrough).
* Docker images are pre-built, and source is contained in a subfolder.
* Source is the Dockerfile and any custom files needed beyond the
upstream app being packaged.
* Images are pushed to `gcr.io/google-samples`. Contact @jeffmendoza
to have an image pushed
* Images are tagged with a version (not latest) that is referenced
in the example config.
* Only use the code highlighting types
[supported by Rouge](https://github.com/jneen/rouge/wiki/list-of-supported-languages-and-lexers),
as this is what GitHub Pages uses.
* Commands to be copied use the `shell` syntax highlighting type, and
do not include any kind of prompt.
* Example output is in a separate block quote to distinguish it from
the command (which doesn't have a prompt).
* When providing an example command or config for which the user is
expected to substitute text with something specific to them, use
angle brackets: `<IDENTIFIER>` for the text to be substituted.
* Use `kubectl` instead of `cluster\kubectl.sh` for example cli
commands.
### At the end
* Should have a section suggesting what to look at next, both in terms
of "additional resources" and "what example to look at next".
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/guidelines.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->

View file

@ -0,0 +1,25 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM nginx
MAINTAINER Mengqi Yu <mengqiy@google.com>
COPY index2.html /usr/share/nginx/html/index2.html
RUN chmod +r /usr/share/nginx/html/index2.html
COPY auto-reload-nginx.sh /home/auto-reload-nginx.sh
RUN chmod +x /home/auto-reload-nginx.sh
# install inotify
RUN apt-get update && apt-get install -y inotify-tools

38
vendor/k8s.io/kubernetes/examples/https-nginx/Makefile generated vendored Normal file
View file

@ -0,0 +1,38 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
all:
TAG = 1.0
PREFIX = bprashanth/nginxhttps
KEY = /tmp/nginx.key
CERT = /tmp/nginx.crt
SECRET = /tmp/secret.json
keys:
# The CName used here is specific to the service specified in nginx-app.yaml.
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout $(KEY) -out $(CERT) -subj "/CN=nginxsvc/O=nginxsvc"
secret:
go run make_secret.go -crt $(CERT) -key $(KEY) > $(SECRET)
container:
docker build -t $(PREFIX):$(TAG) .
push: container
docker push $(PREFIX):$(TAG)
clean:
rm $(KEY)
rm $(CERT)

129
vendor/k8s.io/kubernetes/examples/https-nginx/README.md generated vendored Normal file
View file

@ -0,0 +1,129 @@
# Nginx https service
This example creates a basic nginx https service useful in verifying proof of concept, keys, secrets, configmap, and end-to-end https service creation in kubernetes.
It uses an [nginx server block](http://wiki.nginx.org/ServerBlockExample) to serve the index page over both http and https. It will detect changes to nginx's configuration file, default.conf, mounted as a configmap volume and reload nginx automatically.
### Generate certificates
First generate a self signed rsa key and certificate that the server can use for TLS. This step invokes the make_secret.go script in the same directory, which uses the kubernetes api to generate a secret json config in /tmp/secret.json.
```sh
$ make keys secret KEY=/tmp/nginx.key CERT=/tmp/nginx.crt SECRET=/tmp/secret.json
```
### Create a https nginx application running in a kubernetes cluster
You need a [running kubernetes cluster](../../docs/getting-started-guides/) for this to work.
Create a secret and a configmap.
```sh
$ kubectl create -f /tmp/secret.json
secret "nginxsecret" created
$ kubectl create configmap nginxconfigmap --from-file=examples/https-nginx/default.conf
configmap "nginxconfigmap" created
```
Create a service and a replication controller using the configuration in nginx-app.yaml.
```sh
$ kubectl create -f examples/https-nginx/nginx-app.yaml
You have exposed your service on an external port on all nodes in your
cluster. If you want to expose this service to the external internet, you may
need to set up firewall rules for the service port(s) (tcp:32211,tcp:30028) to serve traffic.
...
service "nginxsvc" created
replicationcontroller "my-nginx" created
```
Then, find the node port that Kubernetes is using for http and https traffic.
```sh
$ kubectl get service nginxsvc -o json
...
{
"name": "http",
"protocol": "TCP",
"port": 80,
"targetPort": 80,
"nodePort": 32211
},
{
"name": "https",
"protocol": "TCP",
"port": 443,
"targetPort": 443,
"nodePort": 30028
}
...
```
If you are using Kubernetes on a cloud provider, you may need to create cloud firewall rules to serve traffic.
If you are using GCE or GKE, you can use the following commands to add firewall rules.
```sh
$ gcloud compute firewall-rules create allow-nginx-http --allow tcp:32211 --description "Incoming http allowed."
Created [https://www.googleapis.com/compute/v1/projects/hello-world-job/global/firewalls/allow-nginx-http].
NAME NETWORK SRC_RANGES RULES SRC_TAGS TARGET_TAGS
allow-nginx-http default 0.0.0.0/0 tcp:32211
$ gcloud compute firewall-rules create allow-nginx-https --allow tcp:30028 --description "Incoming https allowed."
Created [https://www.googleapis.com/compute/v1/projects/hello-world-job/global/firewalls/allow-nginx-https].
NAME NETWORK SRC_RANGES RULES SRC_TAGS TARGET_TAGS
allow-nginx-https default 0.0.0.0/0 tcp:30028
```
Find your nodes' IPs.
```sh
$ kubectl get nodes -o json | grep ExternalIP -A 2
"type": "ExternalIP",
"address": "104.198.1.26"
}
--
"type": "ExternalIP",
"address": "104.198.12.158"
}
--
"type": "ExternalIP",
"address": "104.198.11.137"
}
```
Now your service is up. You can either use your browser or type the following commands.
```sh
$ curl https://<your-node-ip>:<your-port> -k
$ curl https://104.198.1.26:30028 -k
...
<title>Welcome to nginx!</title>
...
```
Then we will update the configmap by changing `index.html` to `index2.html`.
```sh
kubectl create configmap nginxconfigmap --from-file=examples/https-nginx/default.conf -o yaml --dry-run\
| sed 's/index.html/index2.html/g' | kubectl apply -f -
configmap "nginxconfigmap" configured
```
Wait a few seconds to let the change propagate. Now you should be able to either use your browser or type the following commands to verify Nginx has been reloaded with new configuration.
```sh
$ curl https://<your-node-ip>:<your-port> -k
$ curl https://104.198.1.26:30028 -k
...
<title>Nginx reloaded!</title>
...
```
For more information on how to run this in a kubernetes cluster, please see the [user-guide](../../docs/user-guide/connecting-applications.md).
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/https-nginx/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->

View file

@ -0,0 +1,30 @@
#!/bin/sh
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
nginx "$@"
oldcksum=`cksum /etc/nginx/conf.d/default.conf`
inotifywait -e modify,move,create,delete -mr --timefmt '%d/%m/%y %H:%M' --format '%T' \
/etc/nginx/conf.d/ | while read date time; do
newcksum=`cksum /etc/nginx/conf.d/default.conf`
if [ "$newcksum" != "$oldcksum" ]; then
echo "At ${time} on ${date}, config file update detected."
oldcksum=$newcksum
nginx -s reload
fi
done

View file

@ -0,0 +1,17 @@
server {
listen 80 default_server;
listen [::]:80 default_server ipv6only=on;
listen 443 ssl;
root /usr/share/nginx/html;
index index.html;
server_name localhost;
ssl_certificate /etc/nginx/ssl/nginx.crt;
ssl_certificate_key /etc/nginx/ssl/nginx.key;
location / {
try_files $uri $uri/ =404;
}
}

View file

@ -0,0 +1,28 @@
<!DOCTYPE html>
<html>
<head>
<title>Nginx reloaded!</title>
<style>
body {
width: 35em;
margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif;
}
</style>
</head>
<body>
<h1>Nginx has been reloaded!</h1>
<p>If you see this page, the nginx web server has been automaticly reloaded, since the config file has been updated using <a href="https://github.com/kubernetes/kubernetes">Kubernetes</a>.</p>
<p>For online documentation and support please refer to
<a href="http://kubernetes.io/">kubernetes.io</a>.<br/></p>
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>

View file

@ -0,0 +1,70 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// A small script that converts the given open ssl public/private keys to
// a secret that it writes to stdout as json. Most common use case is to
// create a secret from self signed certificates used to authenticate with
// a devserver. Usage: go run make_secret.go -crt ca.crt -key priv.key > secret.json
package main
import (
"flag"
"fmt"
"io/ioutil"
"log"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/runtime"
// This installs the legacy v1 API
_ "k8s.io/kubernetes/pkg/api/install"
)
// TODO:
// Add a -o flag that writes to the specified destination file.
// Teach the script to create crt and key if -crt and -key aren't specified.
var (
crt = flag.String("crt", "", "path to nginx certificates.")
key = flag.String("key", "", "path to nginx private key.")
)
func read(file string) []byte {
b, err := ioutil.ReadFile(file)
if err != nil {
log.Fatalf("Cannot read file %v, %v", file, err)
}
return b
}
func main() {
flag.Parse()
if *crt == "" || *key == "" {
log.Fatalf("Need to specify -crt -key and -template")
}
nginxCrt := read(*crt)
nginxKey := read(*key)
secret := &api.Secret{
ObjectMeta: api.ObjectMeta{
Name: "nginxsecret",
},
Data: map[string][]byte{
"nginx.crt": nginxCrt,
"nginx.key": nginxKey,
},
}
fmt.Printf(runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), secret))
}

View file

@ -0,0 +1,54 @@
apiVersion: v1
kind: Service
metadata:
name: nginxsvc
labels:
app: nginx
spec:
type: NodePort
ports:
- port: 80
protocol: TCP
name: http
- port: 443
protocol: TCP
name: https
selector:
app: nginx
---
apiVersion: v1
kind: ReplicationController
metadata:
name: my-nginx
spec:
replicas: 1
template:
metadata:
labels:
app: nginx
spec:
volumes:
- name: secret-volume
secret:
secretName: nginxsecret
- name: configmap-volume
configMap:
name: nginxconfigmap
containers:
- name: nginxhttps
image: ymqytw/nginxhttps:1.5
command: ["/home/auto-reload-nginx.sh"]
ports:
- containerPort: 443
- containerPort: 80
livenessProbe:
httpGet:
path: /index.html
port: 80
initialDelaySeconds: 30
timeoutSeconds: 1
volumeMounts:
- mountPath: /etc/nginx/ssl
name: secret-volume
- mountPath: /etc/nginx/conf.d
name: configmap-volume

134
vendor/k8s.io/kubernetes/examples/javaee/README.md generated vendored Normal file
View file

@ -0,0 +1,134 @@
## Java EE Application using WildFly and MySQL
The following document describes the deployment of a Java EE application using [WildFly](http://wildfly.org) application server and MySQL database server on Kubernetes. The sample application source code is at: https://github.com/javaee-samples/javaee7-simple-sample.
### Prerequisites
https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/prereqs.md
### Start MySQL Pod
In Kubernetes a [_Pod_](../../docs/user-guide/pods.md) is the smallest deployable unit that can be created, scheduled, and managed. It's a collocated group of containers that share an IP and storage volume.
Here is the config for MySQL pod: [mysql-pod.yaml](mysql-pod.yaml)
<!-- BEGIN MUNGE: mysql-pod.yaml -->
<!-- END MUNGE: EXAMPLE -->
Create the MySQL pod:
```sh
kubectl create -f examples/javaee/mysql-pod.yaml
```
Check status of the pod:
```sh
kubectl get -w po
NAME READY STATUS RESTARTS AGE
mysql-pod 0/1 Pending 0 4s
NAME READY STATUS RESTARTS AGE
mysql-pod 0/1 Running 0 44s
mysql-pod 1/1 Running 0 44s
```
Wait for the status to `1/1` and `Running`.
### Start MySQL Service
We are creating a [_Service_](../../docs/user-guide/services.md) to expose the TCP port of the MySQL server. A Service distributes traffic across a set of Pods. The order of Service and the targeted Pods does not matter. However Service needs to be started before any other Pods consuming the Service are started.
In this application, we will use a Kubernetes Service to provide a discoverable endpoints for the MySQL endpoint in the cluster. MySQL service target pods with the labels `name: mysql-pod` and `context: docker-k8s-lab`.
Here is definition of the MySQL service: [mysql-service.yaml](mysql-service.yaml)
<!-- BEGIN MUNGE: mysql-service.yaml -->
<!-- END MUNGE: EXAMPLE -->
Create this service:
```sh
kubectl create -f examples/javaee/mysql-service.yaml
```
Get status of the service:
```sh
kubectl get -w svc
NAME LABELS SELECTOR IP(S) PORT(S)
kubernetes component=apiserver,provider=kubernetes <none> 10.247.0.1 443/TCP
mysql-service context=docker-k8s-lab,name=mysql-pod context=docker-k8s-lab,name=mysql-pod 10.247.63.43 3306/TCP
```
If multiple services are running, then it can be narrowed by specifying labels:
```sh
kubectl get -w po -l context=docker-k8s-lab,name=mysql-pod
NAME READY STATUS RESTARTS AGE
mysql-pod 1/1 Running 0 4m
```
This is also the selector label used by service to target pods.
When a Service is run on a node, the kubelet adds a set of environment variables for each active Service. It supports both Docker links compatible variables and simpler `{SVCNAME}_SERVICE_HOST` and `{SVCNAME}_SERVICE_PORT` variables, where the Service name is upper-cased and dashes are converted to underscores.
Our service name is ``mysql-service'' and so ``MYSQL_SERVICE_SERVICE_HOST'' and ``MYSQL_SERVICE_SERVICE_PORT'' variables are available to other pods. This host and port variables are then used to create the JDBC resource in WildFly.
### Start WildFly Replication Controller
WildFly is a lightweight Java EE 7 compliant application server. It is wrapped in a Replication Controller and used as the Java EE runtime.
In Kubernetes a [_Replication Controller_](../../docs/user-guide/replication-controller.md) is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of it's set. Unlike a service it also has a desired number of replicas, and it will create or delete pods to ensure that the number of pods matches up with it's desired state.
Here is definition of the MySQL service: [wildfly-rc.yaml](wildfly-rc.yaml).
<!-- BEGIN MUNGE: wildfly-rc.yaml -->
<!-- END MUNGE: EXAMPLE -->
Create this controller:
```sh
kubectl create -f examples/javaee/wildfly-rc.yaml
```
Check status of the pod inside replication controller:
```sh
kubectl get po
NAME READY STATUS RESTARTS AGE
mysql-pod 1/1 Running 0 1h
wildfly-rc-w2kk5 1/1 Running 0 6m
```
### Access the application
Get IP address of the pod:
```sh
kubectl get -o template po wildfly-rc-w2kk5 --template={{.status.podIP}}
10.246.1.23
```
Log in to node and access the application:
```sh
vagrant ssh node-1
Last login: Thu Jul 16 00:24:36 2015 from 10.0.2.2
[vagrant@kubernetes-node-1 ~]$ curl http://10.246.1.23:8080/employees/resources/employees/
<?xml version="1.0" encoding="UTF-8" standalone="yes"?><collection><employee><id>1</id><name>Penny</name></employee><employee><id>2</id><name>Sheldon</name></employee><employee><id>3</id><name>Amy</name></employee><employee><id>4</id><name>Leonard</name></employee><employee><id>5</id><name>Bernadette</name></employee><employee><id>6</id><name>Raj</name></employee><employee><id>7</id><name>Howard</name></employee><employee><id>8</id><name>Priya</name></employee></collection>
```
### Delete resources
All resources created in this application can be deleted:
```sh
kubectl delete -f examples/javaee/mysql-pod.yaml
kubectl delete -f examples/javaee/mysql-service.yaml
kubectl delete -f examples/javaee/wildfly-rc.yaml
```
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/javaee/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->

View file

@ -0,0 +1,28 @@
apiVersion: v1
kind: Pod
metadata:
name: mysql-pod
labels:
name: mysql-pod
context: docker-k8s-lab
spec:
containers:
-
name: mysql
image: mysql:latest
env:
-
name: "MYSQL_USER"
value: "mysql"
-
name: "MYSQL_PASSWORD"
value: "mysql"
-
name: "MYSQL_DATABASE"
value: "sample"
-
name: "MYSQL_ROOT_PASSWORD"
value: "supersecret"
ports:
-
containerPort: 3306

View file

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: mysql-service
labels:
name: mysql-pod
context: docker-k8s-lab
spec:
ports:
# the port that this service should serve on
- port: 3306
# label keys and values that must match in order to receive traffic for this service
selector:
name: mysql-pod
context: docker-k8s-lab

View file

@ -0,0 +1,19 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: wildfly-rc
labels:
name: wildfly
context: docker-k8s-lab
spec:
replicas: 1
template:
metadata:
labels:
name: wildfly
spec:
containers:
- name: wildfly-rc-pod
image: arungupta/wildfly-mysql-javaee7:k8s
ports:
- containerPort: 8080

View file

@ -0,0 +1,185 @@
## Java Web Application with Tomcat and Sidecar Container
The following document describes the deployment of a Java Web application using Tomcat. Instead of packaging `war` file inside the Tomcat image or mount the `war` as a volume, we use a sidecar container as `war` file provider.
### Prerequisites
https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/prereqs.md
### Overview
This sidecar mode brings a new workflow for Java users:
![](workflow.png?raw=true "Workflow")
As you can see, user can create a `sample:v2` container as sidecar to "provide" war file to Tomcat by copying it to the shared `emptyDir` volume. And Pod will make sure the two containers compose an "atomic" scheduling unit, which is perfect for this case. Thus, your application version management will be totally separated from web server management.
For example, if you are going to change the configurations of your Tomcat:
```console
$ docker exec -it <tomcat_container_id> /bin/bash
# make some change, and then commit it to a new image
$ docker commit <tomcat_container_id> mytomcat:7.0-dev
```
Done! The new Tomcat image **will not** mess up with your `sample.war` file. You can re-use your tomcat image with lots of different war container images for lots of different apps without having to build lots of different images.
Also this means that rolling out a new Tomcat to patch security or whatever else, doesn't require rebuilding N different images.
**Why not put my `sample.war` in a host dir and mount it to tomcat container?**
You have to **manage the volumes** in this case, for example, when you restart or scale the pod on another node, your contents is not ready on that host.
Generally, we have to set up a distributed file system (NFS at least) volume to solve this (if we do not have GCE PD volume). But this is generally unnecessary.
### How To Set this Up
In Kubernetes a [_Pod_](../../docs/user-guide/pods.md) is the smallest deployable unit that can be created, scheduled, and managed. It's a collocated group of containers that share an IP and storage volume.
Here is the config [javaweb.yaml](javaweb.yaml) for Java Web pod:
NOTE: you should define `war` container **first** as it is the "provider".
<!-- BEGIN MUNGE: javaweb.yaml -->
```
apiVersion: v1
kind: Pod
metadata:
name: javaweb
spec:
containers:
- image: resouer/sample:v1
name: war
volumeMounts:
- mountPath: /app
name: app-volume
- image: resouer/mytomcat:7.0
name: tomcat
command: ["sh","-c","/root/apache-tomcat-7.0.42-v2/bin/start.sh"]
volumeMounts:
- mountPath: /root/apache-tomcat-7.0.42-v2/webapps
name: app-volume
ports:
- containerPort: 8080
hostPort: 8001
volumes:
- name: app-volume
emptyDir: {}
```
<!-- END MUNGE: EXAMPLE -->
The only magic here is the `resouer/sample:v1` image:
```
FROM busybox:latest
ADD sample.war sample.war
CMD "sh" "mv.sh"
```
And the contents of `mv.sh` is:
```sh
cp /sample.war /app
tail -f /dev/null
```
#### Explanation
1. 'war' container only contains the `war` file of your app
2. 'war' container's CMD tries to copy `sample.war` to the `emptyDir` volume path
3. The last line of `tail -f` is just used to hold the container, as Replication Controller does not support one-off task
4. 'tomcat' container will load the `sample.war` from volume path
What's more, if you don't want to enclose a build-in `mv.sh` script in the `war` container, you can use Pod lifecycle handler to do the copy work, here's a example [javaweb-2.yaml](javaweb-2.yaml):
<!-- BEGIN MUNGE: javaweb-2.yaml -->
```
apiVersion: v1
kind: Pod
metadata:
name: javaweb-2
spec:
containers:
- image: resouer/sample:v2
name: war
lifecycle:
postStart:
exec:
command:
- "cp"
- "/sample.war"
- "/app"
volumeMounts:
- mountPath: /app
name: app-volume
- image: resouer/mytomcat:7.0
name: tomcat
command: ["sh","-c","/root/apache-tomcat-7.0.42-v2/bin/start.sh"]
volumeMounts:
- mountPath: /root/apache-tomcat-7.0.42-v2/webapps
name: app-volume
ports:
- containerPort: 8080
hostPort: 8001
volumes:
- name: app-volume
emptyDir: {}
```
<!-- END MUNGE: EXAMPLE -->
And the `resouer/sample:v2` Dockerfile is quite simple:
```
FROM busybox:latest
ADD sample.war sample.war
CMD "tail" "-f" "/dev/null"
```
#### Explanation
1. 'war' container only contains the `war` file of your app
2. 'war' container's CMD uses `tail -f` to hold the container, nothing more
3. The `postStart` lifecycle handler will do `cp` after the `war` container is started
4. Again 'tomcat' container will load the `sample.war` from volume path
Done! Now your `war` container contains nothing except `sample.war`, clean enough.
### Test It Out
Create the Java web pod:
```console
$ kubectl create -f examples/javaweb-tomcat-sidecar/javaweb-2.yaml
```
Check status of the pod:
```console
$ kubectl get -w po
NAME READY STATUS RESTARTS AGE
javaweb-2 2/2 Running 0 7s
```
Wait for the status to `2/2` and `Running`. Then you can visit "Hello, World" page on `http://localhost:8001/sample/index.html`
You can also test `javaweb.yaml` in the same way.
### Delete Resources
All resources created in this application can be deleted:
```console
$ kubectl delete -f examples/javaweb-tomcat-sidecar/javaweb-2.yaml
```
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/javaweb-tomcat-sidecar/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->

View file

@ -0,0 +1,31 @@
apiVersion: v1
kind: Pod
metadata:
name: javaweb-2
spec:
containers:
- image: resouer/sample:v2
name: war
lifecycle:
postStart:
exec:
command:
- "cp"
- "/sample.war"
- "/app"
volumeMounts:
- mountPath: /app
name: app-volume
- image: resouer/mytomcat:7.0
name: tomcat
command: ["sh","-c","/root/apache-tomcat-7.0.42-v2/bin/start.sh"]
volumeMounts:
- mountPath: /root/apache-tomcat-7.0.42-v2/webapps
name: app-volume
ports:
- containerPort: 8080
hostPort: 8001
volumes:
- name: app-volume
emptyDir: {}

View file

@ -0,0 +1,24 @@
apiVersion: v1
kind: Pod
metadata:
name: javaweb
spec:
containers:
- image: resouer/sample:v1
name: war
volumeMounts:
- mountPath: /app
name: app-volume
- image: resouer/mytomcat:7.0
name: tomcat
command: ["sh","-c","/root/apache-tomcat-7.0.42-v2/bin/start.sh"]
volumeMounts:
- mountPath: /root/apache-tomcat-7.0.42-v2/webapps
name: app-volume
ports:
- containerPort: 8080
hostPort: 8001
volumes:
- name: app-volume
emptyDir: {}

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

View file

@ -0,0 +1,7 @@
This file has moved to: http://kubernetes.io/docs/user-guide/jobs/
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/job/expansions/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->

View file

@ -0,0 +1,7 @@
This file has moved to: http://kubernetes.io/docs/user-guide/jobs/
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/job/work-queue-1/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->

Some files were not shown because too many files have changed in this diff Show more