forked from barak/tarpoon
Add glide.yaml and vendor deps
This commit is contained in:
parent
db918f12ad
commit
5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions
839
vendor/k8s.io/kubernetes/examples/storage/cassandra/README.md
generated
vendored
Normal file
839
vendor/k8s.io/kubernetes/examples/storage/cassandra/README.md
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
57
vendor/k8s.io/kubernetes/examples/storage/cassandra/cassandra-controller.yaml
generated
vendored
Normal file
57
vendor/k8s.io/kubernetes/examples/storage/cassandra/cassandra-controller.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: cassandra
|
||||
# The labels will be applied automatically
|
||||
# from the labels in the pod template, if not set
|
||||
# labels:
|
||||
# app: cassandra
|
||||
spec:
|
||||
replicas: 2
|
||||
# The selector will be applied automatically
|
||||
# from the labels in the pod template, if not set.
|
||||
# selector:
|
||||
# app: cassandra
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cassandra
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- /run.sh
|
||||
resources:
|
||||
limits:
|
||||
cpu: 0.5
|
||||
env:
|
||||
- name: MAX_HEAP_SIZE
|
||||
value: 512M
|
||||
- name: HEAP_NEWSIZE
|
||||
value: 100M
|
||||
- name: CASSANDRA_SEED_PROVIDER
|
||||
value: "io.k8s.cassandra.KubernetesSeedProvider"
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
image: gcr.io/google-samples/cassandra:v11
|
||||
name: cassandra
|
||||
ports:
|
||||
- containerPort: 7000
|
||||
name: intra-node
|
||||
- containerPort: 7001
|
||||
name: tls-intra-node
|
||||
- containerPort: 7199
|
||||
name: jmx
|
||||
- containerPort: 9042
|
||||
name: cql
|
||||
volumeMounts:
|
||||
- mountPath: /cassandra_data
|
||||
name: data
|
||||
volumes:
|
||||
- name: data
|
||||
emptyDir: {}
|
||||
56
vendor/k8s.io/kubernetes/examples/storage/cassandra/cassandra-daemonset.yaml
generated
vendored
Normal file
56
vendor/k8s.io/kubernetes/examples/storage/cassandra/cassandra-daemonset.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
name: cassandra
|
||||
name: cassandra
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cassandra
|
||||
spec:
|
||||
# Filter to specific nodes:
|
||||
# nodeSelector:
|
||||
# app: cassandra
|
||||
containers:
|
||||
- command:
|
||||
- /run.sh
|
||||
env:
|
||||
- name: MAX_HEAP_SIZE
|
||||
value: 512M
|
||||
- name: HEAP_NEWSIZE
|
||||
value: 100M
|
||||
- name: CASSANDRA_SEED_PROVIDER
|
||||
value: "io.k8s.cassandra.KubernetesSeedProvider"
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
image: gcr.io/google-samples/cassandra:v11
|
||||
name: cassandra
|
||||
ports:
|
||||
- containerPort: 7000
|
||||
name: intra-node
|
||||
- containerPort: 7001
|
||||
name: tls-intra-node
|
||||
- containerPort: 7199
|
||||
name: jmx
|
||||
- containerPort: 9042
|
||||
name: cql
|
||||
# If you need it it is going away in C* 4.0
|
||||
#- containerPort: 9160
|
||||
# name: thrift
|
||||
resources:
|
||||
requests:
|
||||
cpu: 0.5
|
||||
volumeMounts:
|
||||
- mountPath: /cassandra_data
|
||||
name: data
|
||||
volumes:
|
||||
- name: data
|
||||
emptyDir: {}
|
||||
12
vendor/k8s.io/kubernetes/examples/storage/cassandra/cassandra-service.yaml
generated
vendored
Normal file
12
vendor/k8s.io/kubernetes/examples/storage/cassandra/cassandra-service.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: cassandra
|
||||
name: cassandra
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- port: 9042
|
||||
selector:
|
||||
app: cassandra
|
||||
82
vendor/k8s.io/kubernetes/examples/storage/cassandra/cassandra-statefulset.yaml
generated
vendored
Normal file
82
vendor/k8s.io/kubernetes/examples/storage/cassandra/cassandra-statefulset.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
apiVersion: "apps/v1beta1"
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: cassandra
|
||||
spec:
|
||||
serviceName: cassandra
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cassandra
|
||||
spec:
|
||||
containers:
|
||||
- name: cassandra
|
||||
image: gcr.io/google-samples/cassandra:v11
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 7000
|
||||
name: intra-node
|
||||
- containerPort: 7001
|
||||
name: tls-intra-node
|
||||
- containerPort: 7199
|
||||
name: jmx
|
||||
- containerPort: 9042
|
||||
name: cql
|
||||
resources:
|
||||
limits:
|
||||
cpu: "500m"
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: "500m"
|
||||
memory: 1Gi
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- IPC_LOCK
|
||||
env:
|
||||
- name: MAX_HEAP_SIZE
|
||||
value: 512M
|
||||
- name: HEAP_NEWSIZE
|
||||
value: 100M
|
||||
- name: CASSANDRA_SEEDS
|
||||
value: "cassandra-0.cassandra.default.svc.cluster.local"
|
||||
- name: CASSANDRA_CLUSTER_NAME
|
||||
value: "K8Demo"
|
||||
- name: CASSANDRA_DC
|
||||
value: "DC1-K8Demo"
|
||||
- name: CASSANDRA_RACK
|
||||
value: "Rack1-K8Demo"
|
||||
- name: CASSANDRA_AUTO_BOOTSTRAP
|
||||
value: "false"
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- /ready-probe.sh
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 5
|
||||
# These volume mounts are persistent. They are like inline claims,
|
||||
# but not exactly because the names need to match exactly one of
|
||||
# the stateful pod volumes.
|
||||
volumeMounts:
|
||||
- name: cassandra-data
|
||||
mountPath: /cassandra_data
|
||||
# These are converted to volume claims by the controller
|
||||
# and mounted at the paths mentioned above.
|
||||
# do not use these in production until ssd GCEPersistentDisk or other ssd pd
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: cassandra-data
|
||||
annotations:
|
||||
volume.alpha.kubernetes.io/storage-class: anything
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
78
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/Dockerfile
generated
vendored
Normal file
78
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/Dockerfile
generated
vendored
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM google/debian:jessie
|
||||
|
||||
ADD files /
|
||||
|
||||
ENV DI_VERSION="1.1.1" DI_SHA="dec8167091671df0dd3748a8938102479db5fffc"
|
||||
|
||||
RUN mv /java.list /etc/apt/sources.list.d/java.list \
|
||||
&& apt-get update \
|
||||
&& apt-get -qq -y install --no-install-recommends procps openjdk-8-jre-headless libjemalloc1 curl \
|
||||
localepurge \
|
||||
&& curl -L https://github.com/Yelp/dumb-init/releases/download/v${DI_VERSION}/dumb-init_${DI_VERSION}_amd64 > /sbin/dumb-init \
|
||||
&& echo "$DI_SHA /sbin/dumb-init" | sha1sum -c - \
|
||||
&& mv /cassandra.list /etc/apt/sources.list.d/cassandra.list \
|
||||
&& gpg --keyserver pgp.mit.edu --recv-keys F758CE318D77295D \
|
||||
&& gpg --export --armor F758CE318D77295D | apt-key add - \
|
||||
&& gpg --keyserver pgp.mit.edu --recv-keys 2B5C1B00 \
|
||||
&& gpg --export --armor 2B5C1B00 | apt-key add - \
|
||||
&& gpg --keyserver pgp.mit.edu --recv-keys 0353B12C \
|
||||
&& gpg --export --armor 0353B12C | apt-key add - \
|
||||
&& apt-get update \
|
||||
&& apt-get -qq -y install --no-install-recommends curl cassandra localepurge \
|
||||
&& chmod a+rx /run.sh /sbin/dumb-init /ready-probe.sh \
|
||||
&& mkdir -p /cassandra_data/data \
|
||||
&& mv /logback.xml /cassandra.yaml /etc/cassandra/ \
|
||||
|
||||
# Not able to run as cassandra until https://github.com/kubernetes/kubernetes/issues/2630 is resolved
|
||||
# && chown -R cassandra: /etc/cassandra /cassandra_data /run.sh /kubernetes-cassandra.jar \
|
||||
# && chmod o+w -R /etc/cassandra /cassandra_data \
|
||||
|
||||
&& apt-get -y purge curl localepurge \
|
||||
&& apt-get clean \
|
||||
&& rm -rf \
|
||||
doc \
|
||||
man \
|
||||
info \
|
||||
locale \
|
||||
/var/lib/apt/lists/* \
|
||||
/var/log/* \
|
||||
/var/cache/debconf/* \
|
||||
common-licenses \
|
||||
~/.bashrc \
|
||||
/etc/systemd \
|
||||
/lib/lsb \
|
||||
/lib/udev \
|
||||
/usr/share/doc/ \
|
||||
/usr/share/doc-base/ \
|
||||
/usr/share/man/ \
|
||||
/tmp/*
|
||||
|
||||
|
||||
VOLUME ["/cassandra_data"]
|
||||
|
||||
# 7000: intra-node communication
|
||||
# 7001: TLS intra-node communication
|
||||
# 7199: JMX
|
||||
# 9042: CQL
|
||||
# 9160: thrift service not included cause it is going away
|
||||
EXPOSE 7000 7001 7199 9042
|
||||
|
||||
# Not able to do this until https://github.com/kubernetes/kubernetes/issues/2630 is resolved
|
||||
# if you are using attached storage
|
||||
# USER cassandra
|
||||
|
||||
CMD ["/sbin/dumb-init", "/bin/bash", "/run.sh"]
|
||||
34
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/Makefile
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/Makefile
generated
vendored
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# build the cassandra image.
|
||||
|
||||
VERSION=v11
|
||||
PROJECT_ID=google_samples
|
||||
PROJECT=gcr.io/${PROJECT_ID}
|
||||
|
||||
all: build
|
||||
|
||||
kubernetes-cassandra.jar: ../java/* ../java/src/main/java/io/k8s/cassandra/*.java
|
||||
cd ../java && mvn clean && mvn package
|
||||
mv ../java/target/kubernetes-cassandra*.jar files/kubernetes-cassandra.jar
|
||||
cd ../java && mvn clean
|
||||
|
||||
build: kubernetes-cassandra.jar
|
||||
docker build -t ${PROJECT}/cassandra:${VERSION} .
|
||||
|
||||
push: build
|
||||
gcloud docker -- push ${PROJECT}/cassandra:${VERSION}
|
||||
|
||||
.PHONY: all build push
|
||||
2
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/cassandra.list
generated
vendored
Normal file
2
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/cassandra.list
generated
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
deb http://www.apache.org/dist/cassandra/debian 37x main
|
||||
deb-src http://www.apache.org/dist/cassandra/debian 37x main
|
||||
990
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/cassandra.yaml
generated
vendored
Normal file
990
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/cassandra.yaml
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
2
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/java.list
generated
vendored
Normal file
2
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/java.list
generated
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
# for jre8
|
||||
deb http://http.debian.net/debian jessie-backports main
|
||||
BIN
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/kubernetes-cassandra.jar
generated
vendored
Normal file
BIN
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/kubernetes-cassandra.jar
generated
vendored
Normal file
Binary file not shown.
13
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/logback.xml
generated
vendored
Normal file
13
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/logback.xml
generated
vendored
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
<?xml version="1.0"?>
|
||||
<configuration scan="true">
|
||||
<jmxConfigurator/>
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>%-5level %date{HH:mm:ss,SSS} %msg%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
<root level="INFO">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
</root>
|
||||
<logger name="com.thinkaurelius.thrift" level="ERROR"/>
|
||||
</configuration>
|
||||
27
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/ready-probe.sh
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/ready-probe.sh
generated
vendored
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [[ $(nodetool status | grep $POD_IP) == *"UN"* ]]; then
|
||||
if [[ $DEBUG ]]; then
|
||||
echo "Not Up";
|
||||
fi
|
||||
exit 0;
|
||||
else
|
||||
if [[ $DEBUG ]]; then
|
||||
echo "UN";
|
||||
fi
|
||||
exit 1;
|
||||
fi
|
||||
125
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/run.sh
generated
vendored
Normal file
125
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/run.sh
generated
vendored
Normal file
|
|
@ -0,0 +1,125 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -e
|
||||
CONF_DIR=/etc/cassandra
|
||||
CFG=$CONF_DIR/cassandra.yaml
|
||||
|
||||
# we are doing StatefulSet or just setting our seeds
|
||||
if [ -z "$CASSANDRA_SEEDS" ]; then
|
||||
HOSTNAME=$(hostname -f)
|
||||
fi
|
||||
|
||||
# The following vars relate to there counter parts in $CFG
|
||||
# for instance rpc_address
|
||||
CASSANDRA_RPC_ADDRESS="${CASSANDRA_RPC_ADDRESS:-0.0.0.0}"
|
||||
CASSANDRA_NUM_TOKENS="${CASSANDRA_NUM_TOKENS:-32}"
|
||||
CASSANDRA_CLUSTER_NAME="${CASSANDRA_CLUSTER_NAME:='Test Cluster'}"
|
||||
CASSANDRA_LISTEN_ADDRESS=${POD_IP:-$HOSTNAME}
|
||||
CASSANDRA_BROADCAST_ADDRESS=${POD_IP:-$HOSTNAME}
|
||||
CASSANDRA_BROADCAST_RPC_ADDRESS=${POD_IP:-$HOSTNAME}
|
||||
CASSANDRA_DISK_OPTIMIZATION_STRATEGY="${CASSANDRA_DISK_OPTIMIZATION_STRATEGY:-ssd}"
|
||||
CASSANDRA_MIGRATION_WAIT="${CASSANDRA_MIGRATION_WAIT:-1}"
|
||||
CASSANDRA_ENDPOINT_SNITCH="${CASSANDRA_ENDPOINT_SNITCH:-SimpleSnitch}"
|
||||
CASSANDRA_DC="${CASSANDRA_DC}"
|
||||
CASSANDRA_RACK="${CASSANDRA_RACK}"
|
||||
CASSANDRA_RING_DELAY="${CASSANDRA_RING_DELAY:-30000}"
|
||||
CASSANDRA_AUTO_BOOTSTRAP="${CASSANDRA_AUTO_BOOTSTRAP:-true}"
|
||||
CASSANDRA_SEEDS="${CASSANDRA_SEEDS:false}"
|
||||
CASSANDRA_SEED_PROVIDER="${CASSANDRA_SEED_PROVIDER:-org.apache.cassandra.locator.SimpleSeedProvider}"
|
||||
CASSANDRA_AUTO_BOOTSTRAP="${CASSANDRA_AUTO_BOOTSTRAP:false}"
|
||||
|
||||
# Turn off JMX auth
|
||||
CASSANDRA_OPEN_JMX="${CASSANDRA_OPEN_JMX:-false}"
|
||||
# send GC to STDOUT
|
||||
CASSANDRA_GC_STDOUT="${CASSANDRA_GC_STDOUT:-false}"
|
||||
|
||||
# if DC and RACK are set, use GossipingPropertyFileSnitch
|
||||
if [[ $CASSANDRA_DC && $CASSANDRA_RACK ]]; then
|
||||
echo "dc=$CASSANDRA_DC" > $CONF_DIR/cassandra-rackdc.properties
|
||||
echo "rack=$CASSANDRA_RACK" >> $CONF_DIR/cassandra-rackdc.properties
|
||||
CASSANDRA_ENDPOINT_SNITCH="GossipingPropertyFileSnitch"
|
||||
fi
|
||||
|
||||
# TODO what else needs to be modified
|
||||
for yaml in \
|
||||
broadcast_address \
|
||||
broadcast_rpc_address \
|
||||
cluster_name \
|
||||
listen_address \
|
||||
num_tokens \
|
||||
rpc_address \
|
||||
disk_optimization_strategy \
|
||||
endpoint_snitch \
|
||||
; do
|
||||
var="CASSANDRA_${yaml^^}"
|
||||
val="${!var}"
|
||||
if [ "$val" ]; then
|
||||
sed -ri 's/^(# )?('"$yaml"':).*/\2 '"$val"'/' "$CFG"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "auto_bootstrap: ${CASSANDRA_AUTO_BOOTSTRAP}" >> $CFG
|
||||
|
||||
# set the seed to itself. This is only for the first pod, otherwise
|
||||
# it will be able to get seeds from the seed provider
|
||||
if [[ $CASSANDRA_SEEDS == 'false' ]]; then
|
||||
sed -ri 's/- seeds:.*/- seeds: "'"$POD_IP"'"/' $CFG
|
||||
else # if we have seeds set them. Probably StatefulSet
|
||||
sed -ri 's/- seeds:.*/- seeds: "'"$CASSANDRA_SEEDS"'"/' $CFG
|
||||
fi
|
||||
|
||||
sed -ri 's/- class_name: SEED_PROVIDER/- class_name: '"$CASSANDRA_SEED_PROVIDER"'/' $CFG
|
||||
|
||||
# send gc to stdout
|
||||
if [[ $CASSANDRA_GC_STDOUT == 'true' ]]; then
|
||||
sed -ri 's/ -Xloggc:\/var\/log\/cassandra\/gc\.log//' $CONF_DIR/cassandra-env.sh
|
||||
fi
|
||||
|
||||
# enable RMI and JMX to work on one port
|
||||
echo "JVM_OPTS=\"\$JVM_OPTS -Djava.rmi.server.hostname=$POD_IP\"" >> $CONF_DIR/cassandra-env.sh
|
||||
|
||||
# getting WARNING messages with Migration Service
|
||||
echo "-Dcassandra.migration_task_wait_in_seconds=${CASSANDRA_MIGRATION_WAIT}" >> $CONF_DIR/jvm.options
|
||||
echo "-Dcassandra.ring_delay_ms=${CASSANDRA_RING_DELAY}" >> $CONF_DIR/jvm.options
|
||||
|
||||
|
||||
if [[ $CASSANDRA_OPEN_JMX == 'true' ]]; then
|
||||
export LOCAL_JMX=no
|
||||
sed -ri 's/ -Dcom\.sun\.management\.jmxremote\.authenticate=true/ -Dcom\.sun\.management\.jmxremote\.authenticate=false/' $CONF_DIR/cassandra-env.sh
|
||||
sed -ri 's/ -Dcom\.sun\.management\.jmxremote\.password\.file=\/etc\/cassandra\/jmxremote\.password//' $CONF_DIR/cassandra-env.sh
|
||||
fi
|
||||
|
||||
echo Starting Cassandra on ${CASSANDRA_LISTEN_ADDRESS}
|
||||
echo CASSANDRA_RPC_ADDRESS ${CASSANDRA_RPC_ADDRESS}
|
||||
echo CASSANDRA_NUM_TOKENS ${CASSANDRA_NUM_TOKENS}
|
||||
echo CASSANDRA_CLUSTER_NAME ${CASSANDRA_CLUSTER_NAME}
|
||||
echo CASSANDRA_LISTEN_ADDRESS ${CASSANDRA_LISTEN_ADDRESS}
|
||||
echo CASSANDRA_BROADCAST_ADDRESS ${CASSANDRA_BROADCAST_ADDRESS}
|
||||
echo CASSANDRA_BROADCAST_RPC_ADDRESS ${CASSANDRA_BROADCAST_RPC_ADDRESS}
|
||||
echo CASSANDRA_DISK_OPTIMIZATION_STRATEGY ${CASSANDRA_DISK_OPTIMIZATION_STRATEGY}
|
||||
echo CASSANDRA_MIGRATION_WAIT ${CASSANDRA_MIGRATION_WAIT}
|
||||
echo CASSANDRA_ENDPOINT_SNITCH ${CASSANDRA_ENDPOINT_SNITCH}
|
||||
echo CASSANDRA_DC ${CASSANDRA_DC}
|
||||
echo CASSANDRA_RACK ${CASSANDRA_RACK}
|
||||
echo CASSANDRA_RING_DELAY ${CASSANDRA_RING_DELAY}
|
||||
echo CASSANDRA_AUTO_BOOTSTRAP ${CASSANDRA_AUTO_BOOTSTRAP}
|
||||
echo CASSANDRA_SEEDS ${CASSANDRA_SEEDS}
|
||||
echo CASSANDRA_SEED_PROVIDER ${CASSANDRA_SEED_PROVIDER}
|
||||
echo CASSANDRA_AUTO_BOOTSTRAP ${CASSANDRA_AUTO_BOOTSTRAP}
|
||||
|
||||
export CLASSPATH=/kubernetes-cassandra.jar
|
||||
cassandra -R -f
|
||||
1
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/.gitignore
generated
vendored
Normal file
1
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/.gitignore
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
target
|
||||
34
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/README.md
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
# Cassandra on Kubernetes Custom Seed Provider: releases.k8s.io/HEAD
|
||||
|
||||
Within any deployment of Cassandra a Seed Provider is used to for node discovery and communication. When a Cassandra node first starts it must discover which nodes, or seeds, for the information about the Cassandra nodes in the ring / rack / datacenter.
|
||||
|
||||
This Java project provides a custom Seed Provider which communicates with the Kubernetes API to discover the required information. This provider is bundled with the Docker provided in this example.
|
||||
|
||||
# Configuring the Seed Provider
|
||||
|
||||
The following environment variables may be used to override the default configurations:
|
||||
|
||||
| ENV VAR | DEFAULT VALUE | NOTES |
|
||||
| ------------- |:-------------: |:-------------:|
|
||||
| KUBERNETES_PORT_443_TCP_ADDR | kubernetes.default.svc.cluster.local | The hostname of the API server |
|
||||
| KUBERNETES_PORT_443_TCP_PORT | 443 | API port number |
|
||||
| CASSANDRA_SERVICE | cassandra | Default service name for lookup |
|
||||
| POD_NAMESPACE | default | Default pod service namespace |
|
||||
| K8S_ACCOUNT_TOKEN | /var/run/secrets/kubernetes.io/serviceaccount/token | Default path to service token |
|
||||
|
||||
# Using
|
||||
|
||||
|
||||
If no endpoints are discovered from the API the seeds configured in the cassandra.yaml file are used.
|
||||
|
||||
# Provider limitations
|
||||
|
||||
This Cassandra Provider implements `SeedProvider`. and utilizes `SimpleSnitch`. This limits a Cassandra Ring to a single Cassandra Datacenter and ignores Rack setup. Datastax provides more documentation on the use of [_SNITCHES_](https://docs.datastax.com/en/cassandra/3.x/cassandra/architecture/archSnitchesAbout.html). Further development is planned to
|
||||
expand this capability.
|
||||
|
||||
This in affect makes every node a seed provider, which is not a recommended best practice. This increases maintenance and reduces gossip performance.
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
94
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/pom.xml
generated
vendored
Normal file
94
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/pom.xml
generated
vendored
Normal file
|
|
@ -0,0 +1,94 @@
|
|||
<!--
|
||||
Copyright (C) 2015 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
||||
use this file except in compliance with the License. You may obtain a copy of
|
||||
the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and limitations under
|
||||
the License.
|
||||
-->
|
||||
<project>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>io.k8s.cassandra</groupId>
|
||||
<artifactId>kubernetes-cassandra</artifactId>
|
||||
<version>1.0.2</version>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<version>3.5.1</version>
|
||||
<configuration>
|
||||
<source>1.8</source>
|
||||
<target>1.8</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
<properties>
|
||||
<logback.version>1.1.3</logback.version>
|
||||
<cassandra.version>3.7</cassandra.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>4.11</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.hamcrest</groupId>
|
||||
<artifactId>hamcrest-all</artifactId>
|
||||
<version>1.3</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-api</artifactId>
|
||||
<version>1.7.5</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>ch.qos.logback</groupId>
|
||||
<artifactId>logback-classic</artifactId>
|
||||
<version>${logback.version}</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>ch.qos.logback</groupId>
|
||||
<artifactId>logback-core</artifactId>
|
||||
<version>${logback.version}</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.codehaus.jackson</groupId>
|
||||
<artifactId>jackson-core-asl</artifactId>
|
||||
<version>1.6.3</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.codehaus.jackson</groupId>
|
||||
<artifactId>jackson-mapper-asl</artifactId>
|
||||
<version>1.6.3</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.cassandra</groupId>
|
||||
<artifactId>cassandra-all</artifactId>
|
||||
<version>${cassandra.version}</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
</project>
|
||||
254
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/src/main/java/io/k8s/cassandra/KubernetesSeedProvider.java
generated
vendored
Normal file
254
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/src/main/java/io/k8s/cassandra/KubernetesSeedProvider.java
generated
vendored
Normal file
|
|
@ -0,0 +1,254 @@
|
|||
/*
|
||||
* Copyright (C) 2015 Google Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
||||
* use this file except in compliance with the License. You may obtain a copy of
|
||||
* the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
|
||||
package io.k8s.cassandra;
|
||||
|
||||
import org.apache.cassandra.config.Config;
|
||||
import org.apache.cassandra.config.ConfigurationLoader;
|
||||
import org.apache.cassandra.config.YamlConfigurationLoader;
|
||||
import org.apache.cassandra.exceptions.ConfigurationException;
|
||||
import org.apache.cassandra.locator.SeedProvider;
|
||||
import org.apache.cassandra.locator.SimpleSeedProvider;
|
||||
import org.apache.cassandra.utils.FBUtilities;
|
||||
import org.codehaus.jackson.annotate.JsonIgnoreProperties;
|
||||
import org.codehaus.jackson.map.ObjectMapper;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.net.ssl.*;
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.URL;
|
||||
import java.net.UnknownHostException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Paths;
|
||||
import java.security.KeyManagementException;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.security.SecureRandom;
|
||||
import java.security.cert.X509Certificate;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Self discovery {@link SeedProvider} that creates a list of Cassandra Seeds by
|
||||
* communicating with the Kubernetes API.
|
||||
* <p>Various System Variable can be used to configure this provider:
|
||||
* <ul>
|
||||
* <li>KUBERNETES_PORT_443_TCP_ADDR defaults to kubernetes.default.svc.cluster.local</li>
|
||||
* <li>KUBERNETES_PORT_443_TCP_PORT defaults to 443</li>
|
||||
* <li>CASSANDRA_SERVICE defaults to cassandra</li>
|
||||
* <li>POD_NAMESPACE defaults to 'default'</li>
|
||||
* <li>CASSANDRA_SERVICE_NUM_SEEDS defaults to 8 seeds</li>
|
||||
* <li>K8S_ACCOUNT_TOKEN defaults to the path for the default token</li>
|
||||
* </ul>
|
||||
*/
|
||||
public class KubernetesSeedProvider implements SeedProvider {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(KubernetesSeedProvider.class);
|
||||
|
||||
/**
|
||||
* default seeds to fall back on
|
||||
*/
|
||||
private List<InetAddress> defaultSeeds;
|
||||
|
||||
private TrustManager[] trustAll;
|
||||
|
||||
private HostnameVerifier trustAllHosts;
|
||||
|
||||
/**
|
||||
* Create new Seeds
|
||||
* @param params
|
||||
*/
|
||||
public KubernetesSeedProvider(Map<String, String> params) {
|
||||
|
||||
// Create default seeds
|
||||
defaultSeeds = createDefaultSeeds();
|
||||
|
||||
// TODO: Load the CA cert when it is available on all platforms.
|
||||
trustAll = new TrustManager[] {
|
||||
new X509TrustManager() {
|
||||
public void checkServerTrusted(X509Certificate[] certs, String authType) {}
|
||||
public void checkClientTrusted(X509Certificate[] certs, String authType) {}
|
||||
public X509Certificate[] getAcceptedIssuers() { return null; }
|
||||
}
|
||||
};
|
||||
|
||||
trustAllHosts = new HostnameVerifier() {
|
||||
public boolean verify(String hostname, SSLSession session) {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Call kubernetes API to collect a list of seed providers
|
||||
* @return list of seed providers
|
||||
*/
|
||||
public List<InetAddress> getSeeds() {
|
||||
|
||||
String host = getEnvOrDefault("KUBERNETES_PORT_443_TCP_ADDR", "kubernetes.default.svc.cluster.local");
|
||||
String port = getEnvOrDefault("KUBERNETES_PORT_443_TCP_PORT", "443");
|
||||
String serviceName = getEnvOrDefault("CASSANDRA_SERVICE", "cassandra");
|
||||
String podNamespace = getEnvOrDefault("POD_NAMESPACE", "default");
|
||||
String path = String.format("/api/v1/namespaces/%s/endpoints/", podNamespace);
|
||||
String seedSizeVar = getEnvOrDefault("CASSANDRA_SERVICE_NUM_SEEDS", "8");
|
||||
Integer seedSize = Integer.valueOf(seedSizeVar);
|
||||
String accountToken = getEnvOrDefault("K8S_ACCOUNT_TOKEN", "/var/run/secrets/kubernetes.io/serviceaccount/token");
|
||||
|
||||
List<InetAddress> seeds = new ArrayList<InetAddress>();
|
||||
try {
|
||||
String token = getServiceAccountToken(accountToken);
|
||||
|
||||
SSLContext ctx = SSLContext.getInstance("SSL");
|
||||
ctx.init(null, trustAll, new SecureRandom());
|
||||
|
||||
String PROTO = "https://";
|
||||
URL url = new URL(PROTO + host + ":" + port + path + serviceName);
|
||||
logger.info("Getting endpoints from " + url);
|
||||
HttpsURLConnection conn = (HttpsURLConnection)url.openConnection();
|
||||
|
||||
// TODO: Remove this once the CA cert is propagated everywhere, and replace
|
||||
// with loading the CA cert.
|
||||
conn.setHostnameVerifier(trustAllHosts);
|
||||
|
||||
conn.setSSLSocketFactory(ctx.getSocketFactory());
|
||||
conn.addRequestProperty("Authorization", "Bearer " + token);
|
||||
ObjectMapper mapper = new ObjectMapper();
|
||||
Endpoints endpoints = mapper.readValue(conn.getInputStream(), Endpoints.class);
|
||||
|
||||
if (endpoints != null) {
|
||||
// Here is a problem point, endpoints.subsets can be null in first node cases.
|
||||
if (endpoints.subsets != null && !endpoints.subsets.isEmpty()){
|
||||
for (Subset subset : endpoints.subsets) {
|
||||
if (subset.addresses != null && !subset.addresses.isEmpty()) {
|
||||
for (Address address : subset.addresses) {
|
||||
seeds.add(InetAddress.getByName(address.ip));
|
||||
|
||||
if(seeds.size() >= seedSize) {
|
||||
logger.info("Available num endpoints: " + seeds.size());
|
||||
return Collections.unmodifiableList(seeds);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
logger.info("Available num endpoints: " + seeds.size());
|
||||
} else {
|
||||
logger.warn("Endpoints are not available using default seeds in cassandra.yaml");
|
||||
return Collections.unmodifiableList(defaultSeeds);
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
logger.warn("Request to kubernetes apiserver failed, using default seeds in cassandra.yaml", ex);
|
||||
return Collections.unmodifiableList(defaultSeeds);
|
||||
}
|
||||
|
||||
if (seeds.size() == 0) {
|
||||
// If we got nothing, we might be the first instance, in that case
|
||||
// fall back on the seeds that were passed in cassandra.yaml.
|
||||
logger.warn("Seeds are not available using default seeds in cassandra.yaml");
|
||||
return Collections.unmodifiableList(defaultSeeds);
|
||||
}
|
||||
|
||||
return Collections.unmodifiableList(seeds);
|
||||
}
|
||||
|
||||
/**
|
||||
* Code taken from {@link SimpleSeedProvider}. This is used as a fall back
|
||||
* incase we don't find seeds
|
||||
* @return
|
||||
*/
|
||||
protected List<InetAddress> createDefaultSeeds()
|
||||
{
|
||||
Config conf;
|
||||
try {
|
||||
conf = loadConfig();
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
String[] hosts = conf.seed_provider.parameters.get("seeds").split(",", -1);
|
||||
List<InetAddress> seeds = new ArrayList<InetAddress>();
|
||||
for (String host : hosts) {
|
||||
try {
|
||||
seeds.add(InetAddress.getByName(host.trim()));
|
||||
}
|
||||
catch (UnknownHostException ex) {
|
||||
// not fatal... DD will bark if there end up being zero seeds.
|
||||
logger.warn("Seed provider couldn't lookup host {}", host);
|
||||
}
|
||||
}
|
||||
|
||||
if(seeds.size() == 0) {
|
||||
try {
|
||||
seeds.add(InetAddress.getLocalHost());
|
||||
} catch (UnknownHostException e) {
|
||||
logger.warn("Seed provider couldn't lookup localhost");
|
||||
}
|
||||
}
|
||||
return Collections.unmodifiableList(seeds);
|
||||
}
|
||||
|
||||
/**
|
||||
* Code taken from {@link SimpleSeedProvider}
|
||||
* @return
|
||||
*/
|
||||
protected static Config loadConfig() throws ConfigurationException
|
||||
{
|
||||
String loaderClass = System.getProperty("cassandra.config.loader");
|
||||
ConfigurationLoader loader = loaderClass == null
|
||||
? new YamlConfigurationLoader()
|
||||
: FBUtilities.<ConfigurationLoader>construct(loaderClass, "configuration loading");
|
||||
return loader.loadConfig();
|
||||
}
|
||||
|
||||
private static String getEnvOrDefault(String var, String def) {
|
||||
String val = System.getenv(var);
|
||||
if (val == null) {
|
||||
val = def;
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
private static String getServiceAccountToken(String file) {
|
||||
try {
|
||||
return new String(Files.readAllBytes(Paths.get(file)));
|
||||
} catch (IOException e) {
|
||||
logger.warn("unable to load service account token" + file);
|
||||
throw new RuntimeException("Unable to load services account token " + file);
|
||||
}
|
||||
}
|
||||
|
||||
protected List<InetAddress> getDefaultSeeds() {
|
||||
return defaultSeeds;
|
||||
}
|
||||
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
static class Address {
|
||||
public String ip;
|
||||
}
|
||||
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
static class Subset {
|
||||
public List<Address> addresses;
|
||||
}
|
||||
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
static class Endpoints {
|
||||
public List<Subset> subsets;
|
||||
}
|
||||
}
|
||||
64
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/src/test/java/io/k8s/cassandra/KubernetesSeedProviderTest.java
generated
vendored
Normal file
64
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/src/test/java/io/k8s/cassandra/KubernetesSeedProviderTest.java
generated
vendored
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
* Copyright (C) 2015 Google Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
||||
* use this file except in compliance with the License. You may obtain a copy of
|
||||
* the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
|
||||
package io.k8s.cassandra;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.apache.cassandra.locator.SeedProvider;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
public class KubernetesSeedProviderTest {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(KubernetesSeedProviderTest.class);
|
||||
|
||||
@Test
|
||||
@Ignore("has to be run inside of a kube cluster")
|
||||
public void getSeeds() throws Exception {
|
||||
SeedProvider provider = new KubernetesSeedProvider(new HashMap<String, String>());
|
||||
List<InetAddress> seeds = provider.getSeeds();
|
||||
|
||||
assertThat(seeds, is(not(empty())));
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDefaultSeeds() throws Exception {
|
||||
|
||||
KubernetesSeedProvider provider = new KubernetesSeedProvider(new HashMap<String,String>());
|
||||
List<InetAddress> seeds = provider.getDefaultSeeds();
|
||||
List<InetAddress> seedsTest = new ArrayList<>();
|
||||
seedsTest.add(InetAddress.getByName("8.4.4.4"));
|
||||
seedsTest.add(InetAddress.getByName("8.8.8.8"));
|
||||
assertThat(seeds, is(not(empty())));
|
||||
assertThat(seeds, is(seedsTest));
|
||||
logger.debug("seeds loaded {}", seeds);
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
57
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/src/test/resources/cassandra.yaml
generated
vendored
Normal file
57
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/src/test/resources/cassandra.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
# Copyright (C) 2015 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
||||
# use this file except in compliance with the License. You may obtain a copy of
|
||||
# the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations under
|
||||
# the License.
|
||||
#
|
||||
# Warning!
|
||||
# Consider the effects on 'o.a.c.i.s.LegacySSTableTest' before changing schemas in this file.
|
||||
#
|
||||
cluster_name: Test Cluster
|
||||
# memtable_allocation_type: heap_buffers
|
||||
memtable_allocation_type: offheap_objects
|
||||
commitlog_sync: batch
|
||||
commitlog_sync_batch_window_in_ms: 1.0
|
||||
commitlog_segment_size_in_mb: 5
|
||||
commitlog_directory: target/cassandra/commitlog
|
||||
hints_directory: target/cassandra/hints
|
||||
partitioner: org.apache.cassandra.dht.ByteOrderedPartitioner
|
||||
listen_address: 127.0.0.1
|
||||
storage_port: 7010
|
||||
rpc_port: 9170
|
||||
start_native_transport: true
|
||||
native_transport_port: 9042
|
||||
column_index_size_in_kb: 4
|
||||
saved_caches_directory: target/cassandra/saved_caches
|
||||
data_file_directories:
|
||||
- target/cassandra/data
|
||||
disk_access_mode: mmap
|
||||
seed_provider:
|
||||
- class_name: io.k8s.cassandra.KubernetesSeedProvider
|
||||
parameters:
|
||||
- seeds: "8.4.4.4,8.8.8.8"
|
||||
endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch
|
||||
dynamic_snitch: true
|
||||
request_scheduler: org.apache.cassandra.scheduler.RoundRobinScheduler
|
||||
request_scheduler_id: keyspace
|
||||
server_encryption_options:
|
||||
internode_encryption: none
|
||||
keystore: conf/.keystore
|
||||
keystore_password: cassandra
|
||||
truststore: conf/.truststore
|
||||
truststore_password: cassandra
|
||||
incremental_backups: true
|
||||
concurrent_compactors: 4
|
||||
compaction_throughput_mb_per_sec: 0
|
||||
row_cache_class_name: org.apache.cassandra.cache.OHCProvider
|
||||
row_cache_size_in_mb: 16
|
||||
enable_user_defined_functions: true
|
||||
enable_scripted_user_defined_functions: true
|
||||
34
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/src/test/resources/logback-test.xml
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/src/test/resources/logback-test.xml
generated
vendored
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
<!--
|
||||
Copyright (C) 2015 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
||||
use this file except in compliance with the License. You may obtain a copy of
|
||||
the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and limitations under
|
||||
the License.
|
||||
-->
|
||||
|
||||
<configuration debug="false" scan="true">
|
||||
|
||||
<appender name="STDOUT" target="System.out" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>%-5level %date{HH:mm:ss,SSS} %msg%n</pattern>
|
||||
</encoder>
|
||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||
<level>DEBUG</level>
|
||||
</filter>
|
||||
</appender>
|
||||
|
||||
<logger name="io.k8s.cassandra" level="DEBUG"/>
|
||||
|
||||
<root level="INFO">
|
||||
<appender-ref ref="STDOUT" />
|
||||
</root>
|
||||
|
||||
</configuration>
|
||||
252
vendor/k8s.io/kubernetes/examples/storage/hazelcast/README.md
generated
vendored
Normal file
252
vendor/k8s.io/kubernetes/examples/storage/hazelcast/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,252 @@
|
|||
## Cloud Native Deployments of Hazelcast using Kubernetes
|
||||
|
||||
The following document describes the development of a _cloud native_ [Hazelcast](http://hazelcast.org/) deployment on Kubernetes. When we say _cloud native_ we mean an application which understands that it is running within a cluster manager, and uses this cluster management infrastructure to help implement the application. In particular, in this instance, a custom Hazelcast ```bootstrapper``` is used to enable Hazelcast to dynamically discover Hazelcast nodes that have already joined the cluster.
|
||||
|
||||
Any topology changes are communicated and handled by Hazelcast nodes themselves.
|
||||
|
||||
This document also attempts to describe the core components of Kubernetes: _Pods_, _Services_, and _Replication Controllers_.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
This example assumes that you have a Kubernetes cluster installed and running, and that you have installed the `kubectl` command line tool somewhere in your path. Please see the [getting started](../../../docs/getting-started-guides/) for installation instructions for your platform.
|
||||
|
||||
### A note for the impatient
|
||||
|
||||
This is a somewhat long tutorial. If you want to jump straight to the "do it now" commands, please see the [tl; dr](#tl-dr) at the end.
|
||||
|
||||
### Sources
|
||||
|
||||
Source is freely available at:
|
||||
* Hazelcast Discovery - https://github.com/pires/hazelcast-kubernetes-bootstrapper
|
||||
* Dockerfile - https://github.com/pires/hazelcast-kubernetes
|
||||
* Docker Trusted Build - https://quay.io/repository/pires/hazelcast-kubernetes
|
||||
|
||||
### Simple Single Pod Hazelcast Node
|
||||
|
||||
In Kubernetes, the atomic unit of an application is a [_Pod_](../../../docs/user-guide/pods.md). A Pod is one or more containers that _must_ be scheduled onto the same host. All containers in a pod share a network namespace, and may optionally share mounted volumes.
|
||||
|
||||
In this case, we shall not run a single Hazelcast pod, because the discovery mechanism now relies on a service definition.
|
||||
|
||||
|
||||
### Adding a Hazelcast Service
|
||||
|
||||
In Kubernetes a _[Service](../../../docs/user-guide/services.md)_ describes a set of Pods that perform the same task. For example, the set of nodes in a Hazelcast cluster. An important use for a Service is to create a load balancer which distributes traffic across members of the set. But a _Service_ can also be used as a standing query which makes a dynamically changing set of Pods available via the Kubernetes API. This is actually how our discovery mechanism works, by relying on the service to discover other Hazelcast pods.
|
||||
|
||||
Here is the service description:
|
||||
|
||||
<!-- BEGIN MUNGE: EXAMPLE hazelcast-service.yaml -->
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
name: hazelcast
|
||||
name: hazelcast
|
||||
spec:
|
||||
ports:
|
||||
- port: 5701
|
||||
selector:
|
||||
name: hazelcast
|
||||
```
|
||||
|
||||
[Download example](hazelcast-service.yaml?raw=true)
|
||||
<!-- END MUNGE: EXAMPLE hazelcast-service.yaml -->
|
||||
|
||||
The important thing to note here is the `selector`. It is a query over labels, that identifies the set of _Pods_ contained by the _Service_. In this case the selector is `name: hazelcast`. If you look at the Replication Controller specification below, you'll see that the pod has the corresponding label, so it will be selected for membership in this Service.
|
||||
|
||||
Create this service as follows:
|
||||
|
||||
```sh
|
||||
$ kubectl create -f examples/storage/hazelcast/hazelcast-service.yaml
|
||||
```
|
||||
|
||||
### Adding replicated nodes
|
||||
|
||||
The real power of Kubernetes and Hazelcast lies in easily building a replicated, resizable Hazelcast cluster.
|
||||
|
||||
In Kubernetes a _[Replication Controller](../../../docs/user-guide/replication-controller.md)_ is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of it's set. Unlike a _Service_ it also has a desired number of replicas, and it will create or delete _Pods_ to ensure that the number of _Pods_ matches up with it's desired state.
|
||||
|
||||
Replication Controllers will "adopt" existing pods that match their selector query, so let's create a Replication Controller with a single replica to adopt our existing Hazelcast Pod.
|
||||
|
||||
<!-- BEGIN MUNGE: EXAMPLE hazelcast-controller.yaml -->
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
labels:
|
||||
name: hazelcast
|
||||
name: hazelcast
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
name: hazelcast
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: hazelcast
|
||||
spec:
|
||||
containers:
|
||||
- resources:
|
||||
limits:
|
||||
cpu: 0.1
|
||||
image: quay.io/pires/hazelcast-kubernetes:0.6.1
|
||||
name: hazelcast
|
||||
env:
|
||||
- name: "DNS_DOMAIN"
|
||||
value: "cluster.local"
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- containerPort: 5701
|
||||
name: hazelcast
|
||||
```
|
||||
|
||||
[Download example](hazelcast-controller.yaml?raw=true)
|
||||
<!-- END MUNGE: EXAMPLE hazelcast-controller.yaml -->
|
||||
|
||||
There are a few things to note in this description. First is that we are running the `quay.io/pires/hazelcast-kubernetes` image, tag `0.5`. This is a `busybox` installation with JRE 8 Update 45. However it also adds a custom [`application`](https://github.com/pires/hazelcast-kubernetes-bootstrapper) that finds any Hazelcast nodes in the cluster and bootstraps an Hazelcast instance accordingly. The `HazelcastDiscoveryController` discovers the Kubernetes API Server using the built in Kubernetes discovery service, and then uses the Kubernetes API to find new nodes (more on this later).
|
||||
|
||||
You may also note that we tell Kubernetes that the container exposes the `hazelcast` port. Finally, we tell the cluster manager that we need 1 cpu core.
|
||||
|
||||
The bulk of the replication controller config is actually identical to the Hazelcast pod declaration above, it simply gives the controller a recipe to use when creating new pods. The other parts are the `selector` which contains the controller's selector query, and the `replicas` parameter which specifies the desired number of replicas, in this case 1.
|
||||
|
||||
Last but not least, we set `DNS_DOMAIN` environment variable according to your Kubernetes clusters DNS configuration.
|
||||
|
||||
Create this controller:
|
||||
|
||||
```sh
|
||||
$ kubectl create -f examples/storage/hazelcast/hazelcast-controller.yaml
|
||||
```
|
||||
|
||||
After the controller provisions successfully the pod, you can query the service endpoints:
|
||||
|
||||
```sh
|
||||
$ kubectl get endpoints hazelcast -o json
|
||||
{
|
||||
"kind": "Endpoints",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "hazelcast",
|
||||
"namespace": "default",
|
||||
"selfLink": "/api/v1/namespaces/default/endpoints/hazelcast",
|
||||
"uid": "094e507a-2700-11e5-abbc-080027eae546",
|
||||
"resourceVersion": "4094",
|
||||
"creationTimestamp": "2015-07-10T12:34:41Z",
|
||||
"labels": {
|
||||
"name": "hazelcast"
|
||||
}
|
||||
},
|
||||
"subsets": [
|
||||
{
|
||||
"addresses": [
|
||||
{
|
||||
"ip": "10.244.37.3",
|
||||
"targetRef": {
|
||||
"kind": "Pod",
|
||||
"namespace": "default",
|
||||
"name": "hazelcast-nsyzn",
|
||||
"uid": "f57eb6b0-2706-11e5-abbc-080027eae546",
|
||||
"resourceVersion": "4093"
|
||||
}
|
||||
}
|
||||
],
|
||||
"ports": [
|
||||
{
|
||||
"port": 5701,
|
||||
"protocol": "TCP"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
You can see that the _Service_ has found the pod created by the replication controller.
|
||||
|
||||
Now it gets even more interesting.
|
||||
|
||||
Let's scale our cluster to 2 pods:
|
||||
|
||||
```sh
|
||||
$ kubectl scale rc hazelcast --replicas=2
|
||||
```
|
||||
|
||||
Now if you list the pods in your cluster, you should see two hazelcast pods:
|
||||
|
||||
```sh
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
hazelcast-nanfb 1/1 Running 0 40s
|
||||
hazelcast-nsyzn 1/1 Running 0 2m
|
||||
kube-dns-xudrp 3/3 Running 0 1h
|
||||
```
|
||||
|
||||
To prove that this all works, you can use the `log` command to examine the logs of one pod, for example:
|
||||
|
||||
```sh
|
||||
$ kubectl log hazelcast-nanfb hazelcast
|
||||
2015-07-10 13:26:34.443 INFO 5 --- [ main] com.github.pires.hazelcast.Application : Starting Application on hazelcast-nanfb with PID 5 (/bootstrapper.jar started by root in /)
|
||||
2015-07-10 13:26:34.535 INFO 5 --- [ main] s.c.a.AnnotationConfigApplicationContext : Refreshing org.springframework.context.annotation.AnnotationConfigApplicationContext@42cfcf1: startup date [Fri Jul 10 13:26:34 GMT 2015]; root of context hierarchy
|
||||
2015-07-10 13:26:35.888 INFO 5 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Registering beans for JMX exposure on startup
|
||||
2015-07-10 13:26:35.924 INFO 5 --- [ main] c.g.p.h.HazelcastDiscoveryController : Asking k8s registry at https://kubernetes.default.svc.cluster.local..
|
||||
2015-07-10 13:26:37.259 INFO 5 --- [ main] c.g.p.h.HazelcastDiscoveryController : Found 2 pods running Hazelcast.
|
||||
2015-07-10 13:26:37.404 INFO 5 --- [ main] c.h.instance.DefaultAddressPicker : [LOCAL] [someGroup] [3.5] Interfaces is disabled, trying to pick one address from TCP-IP config addresses: [10.244.77.3, 10.244.37.3]
|
||||
2015-07-10 13:26:37.405 INFO 5 --- [ main] c.h.instance.DefaultAddressPicker : [LOCAL] [someGroup] [3.5] Prefer IPv4 stack is true.
|
||||
2015-07-10 13:26:37.415 INFO 5 --- [ main] c.h.instance.DefaultAddressPicker : [LOCAL] [someGroup] [3.5] Picked Address[10.244.77.3]:5701, using socket ServerSocket[addr=/0:0:0:0:0:0:0:0,localport=5701], bind any local is true
|
||||
2015-07-10 13:26:37.852 INFO 5 --- [ main] com.hazelcast.spi.OperationService : [10.244.77.3]:5701 [someGroup] [3.5] Backpressure is disabled
|
||||
2015-07-10 13:26:37.879 INFO 5 --- [ main] c.h.s.i.o.c.ClassicOperationExecutor : [10.244.77.3]:5701 [someGroup] [3.5] Starting with 2 generic operation threads and 2 partition operation threads.
|
||||
2015-07-10 13:26:38.531 INFO 5 --- [ main] com.hazelcast.system : [10.244.77.3]:5701 [someGroup] [3.5] Hazelcast 3.5 (20150617 - 4270dc6) starting at Address[10.244.77.3]:5701
|
||||
2015-07-10 13:26:38.532 INFO 5 --- [ main] com.hazelcast.system : [10.244.77.3]:5701 [someGroup] [3.5] Copyright (c) 2008-2015, Hazelcast, Inc. All Rights Reserved.
|
||||
2015-07-10 13:26:38.533 INFO 5 --- [ main] com.hazelcast.instance.Node : [10.244.77.3]:5701 [someGroup] [3.5] Creating TcpIpJoiner
|
||||
2015-07-10 13:26:38.534 INFO 5 --- [ main] com.hazelcast.core.LifecycleService : [10.244.77.3]:5701 [someGroup] [3.5] Address[10.244.77.3]:5701 is STARTING
|
||||
2015-07-10 13:26:38.672 INFO 5 --- [ cached1] com.hazelcast.nio.tcp.SocketConnector : [10.244.77.3]:5701 [someGroup] [3.5] Connecting to /10.244.37.3:5701, timeout: 0, bind-any: true
|
||||
2015-07-10 13:26:38.683 INFO 5 --- [ cached1] c.h.nio.tcp.TcpIpConnectionManager : [10.244.77.3]:5701 [someGroup] [3.5] Established socket connection between /10.244.77.3:59951
|
||||
2015-07-10 13:26:45.699 INFO 5 --- [ration.thread-1] com.hazelcast.cluster.ClusterService : [10.244.77.3]:5701 [someGroup] [3.5]
|
||||
|
||||
Members [2] {
|
||||
Member [10.244.37.3]:5701
|
||||
Member [10.244.77.3]:5701 this
|
||||
}
|
||||
|
||||
2015-07-10 13:26:47.722 INFO 5 --- [ main] com.hazelcast.core.LifecycleService : [10.244.77.3]:5701 [someGroup] [3.5] Address[10.244.77.3]:5701 is STARTED
|
||||
2015-07-10 13:26:47.723 INFO 5 --- [ main] com.github.pires.hazelcast.Application : Started Application in 13.792 seconds (JVM running for 14.542)
|
||||
```
|
||||
|
||||
Now let's scale our cluster to 4 nodes:
|
||||
|
||||
```sh
|
||||
$ kubectl scale rc hazelcast --replicas=4
|
||||
```
|
||||
|
||||
Examine the status again by checking the logs and you should see the 4 members connected.
|
||||
|
||||
### tl; dr;
|
||||
|
||||
For those of you who are impatient, here is the summary of the commands we ran in this tutorial.
|
||||
|
||||
```sh
|
||||
# create a service to track all hazelcast nodes
|
||||
kubectl create -f examples/storage/hazelcast/hazelcast-service.yaml
|
||||
|
||||
# create a replication controller to replicate hazelcast nodes
|
||||
kubectl create -f examples/storage/hazelcast/hazelcast-controller.yaml
|
||||
|
||||
# scale up to 2 nodes
|
||||
kubectl scale rc hazelcast --replicas=2
|
||||
|
||||
# scale up to 4 nodes
|
||||
kubectl scale rc hazelcast --replicas=4
|
||||
```
|
||||
|
||||
### Hazelcast Discovery Source
|
||||
|
||||
See [here](https://github.com/pires/hazelcast-kubernetes-bootstrapper/blob/master/src/main/java/com/github/pires/hazelcast/HazelcastDiscoveryController.java)
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
31
vendor/k8s.io/kubernetes/examples/storage/hazelcast/hazelcast-controller.yaml
generated
vendored
Normal file
31
vendor/k8s.io/kubernetes/examples/storage/hazelcast/hazelcast-controller.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
labels:
|
||||
name: hazelcast
|
||||
name: hazelcast
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
name: hazelcast
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: hazelcast
|
||||
spec:
|
||||
containers:
|
||||
- resources:
|
||||
limits:
|
||||
cpu: 0.1
|
||||
image: quay.io/pires/hazelcast-kubernetes:0.6.1
|
||||
name: hazelcast
|
||||
env:
|
||||
- name: "DNS_DOMAIN"
|
||||
value: "cluster.local"
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- containerPort: 5701
|
||||
name: hazelcast
|
||||
11
vendor/k8s.io/kubernetes/examples/storage/hazelcast/hazelcast-service.yaml
generated
vendored
Normal file
11
vendor/k8s.io/kubernetes/examples/storage/hazelcast/hazelcast-service.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
name: hazelcast
|
||||
name: hazelcast
|
||||
spec:
|
||||
ports:
|
||||
- port: 5701
|
||||
selector:
|
||||
name: hazelcast
|
||||
25
vendor/k8s.io/kubernetes/examples/storage/hazelcast/image/Dockerfile
generated
vendored
Normal file
25
vendor/k8s.io/kubernetes/examples/storage/hazelcast/image/Dockerfile
generated
vendored
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM quay.io/pires/docker-jre:8u45-2
|
||||
|
||||
MAINTAINER Paulo Pires <pjpires@gmail.com>
|
||||
|
||||
EXPOSE 5701
|
||||
|
||||
RUN \
|
||||
curl -Lskj https://github.com/pires/hazelcast-kubernetes-bootstrapper/releases/download/0.5/hazelcast-kubernetes-bootstrapper-0.5.jar \
|
||||
-o /bootstrapper.jar
|
||||
|
||||
CMD java -jar /bootstrapper.jar
|
||||
137
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/README.md
generated
vendored
Normal file
137
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,137 @@
|
|||
## Galera Replication for MySQL on Kubernetes
|
||||
|
||||
This document explains a simple demonstration example of running MySQL synchronous replication using Galera, specifically, Percona XtraDB cluster. The example is simplistic and used a fixed number (3) of nodes but the idea can be built upon and made more dynamic as Kubernetes matures.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
This example assumes that you have a Kubernetes cluster installed and running, and that you have installed the ```kubectl``` command line tool somewhere in your path. Please see the [getting started](../../../docs/getting-started-guides/) for installation instructions for your platform.
|
||||
|
||||
Also, this example requires the image found in the ```image``` directory. For your convenience, it is built and available on Docker's public image repository as ```capttofu/percona_xtradb_cluster_5_6```. It can also be built which would merely require that the image in the pod or replication controller files is updated.
|
||||
|
||||
This example was tested on OS X with a Galera cluster running on VMWare using the fine repo developed by Paulo Pires [https://github.com/pires/kubernetes-vagrant-coreos-cluster] and client programs built for OS X.
|
||||
|
||||
### Basic concept
|
||||
|
||||
The basic idea is this: three replication controllers with a single pod, corresponding services, and a single overall service to connect to all three nodes. One of the important design goals of MySQL replication and/or clustering is that you don't want a single-point-of-failure, hence the need to distribute each node or slave across hosts or even geographical locations. Kubernetes is well-suited for facilitating this design pattern using the service and replication controller configuration files in this example.
|
||||
|
||||
By defaults, there are only three pods (hence replication controllers) for this cluster. This number can be increased using the variable NUM_NODES, specified in the replication controller configuration file. It's important to know the number of nodes must always be odd.
|
||||
|
||||
When the replication controller is created, it results in the corresponding container to start, run an entrypoint script that installs the MySQL system tables, set up users, and build up a list of servers that is used with the galera parameter ```wsrep_cluster_address```. This is a list of running nodes that galera uses for election of a node to obtain SST (Single State Transfer) from.
|
||||
|
||||
Note: Kubernetes best-practices is to pre-create the services for each controller, and the configuration files which contain the service and replication controller for each node, when created, will result in both a service and replication contrller running for the given node. An important thing to know is that it's important that initially pxc-node1.yaml be processed first and no other pxc-nodeN services that don't have corresponding replication controllers should exist. The reason for this is that if there is a node in ```wsrep_clsuter_address``` without a backing galera node there will be nothing to obtain SST from which will cause the node to shut itself down and the container in question to exit (and another soon relaunched, repeatedly).
|
||||
|
||||
First, create the overall cluster service that will be used to connect to the cluster:
|
||||
|
||||
```kubectl create -f examples/storage/mysql-galera/pxc-cluster-service.yaml```
|
||||
|
||||
Create the service and replication controller for the first node:
|
||||
|
||||
```kubectl create -f examples/storage/mysql-galera/pxc-node1.yaml```
|
||||
|
||||
### Create services and controllers for the remaining nodes
|
||||
|
||||
Repeat the same previous steps for ```pxc-node2``` and ```pxc-node3```
|
||||
|
||||
When complete, you should be able connect with a MySQL client to the IP address
|
||||
service ```pxc-cluster``` to find a working cluster
|
||||
|
||||
### An example of creating a cluster
|
||||
|
||||
Shown below are examples of Using ```kubectl``` from within the ```./examples/storage/mysql-galera``` directory, the status of the lauched replication controllers and services can be confirmed
|
||||
|
||||
```
|
||||
$ kubectl create -f examples/storage/mysql-galera/pxc-cluster-service.yaml
|
||||
services/pxc-cluster
|
||||
|
||||
$ kubectl create -f examples/storage/mysql-galera/pxc-node1.yaml
|
||||
services/pxc-node1
|
||||
replicationcontrollers/pxc-node1
|
||||
|
||||
$ kubectl create -f examples/storage/mysql-galera/pxc-node2.yaml
|
||||
services/pxc-node2
|
||||
replicationcontrollers/pxc-node2
|
||||
|
||||
$ kubectl create -f examples/storage/mysql-galera/pxc-node3.yaml
|
||||
services/pxc-node3
|
||||
replicationcontrollers/pxc-node3
|
||||
|
||||
```
|
||||
|
||||
### Confirm a running cluster
|
||||
|
||||
Verify everything is running:
|
||||
|
||||
```
|
||||
$ kubectl get rc,pods,services
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
pxc-node1 pxc-node1 capttofu/percona_xtradb_cluster_5_6:beta name=pxc-node1 1
|
||||
pxc-node2 pxc-node2 capttofu/percona_xtradb_cluster_5_6:beta name=pxc-node2 1
|
||||
pxc-node3 pxc-node3 capttofu/percona_xtradb_cluster_5_6:beta name=pxc-node3 1
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
pxc-node1-h6fqr 1/1 Running 0 41m
|
||||
pxc-node2-sfqm6 1/1 Running 0 41m
|
||||
pxc-node3-017b3 1/1 Running 0 40m
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
pxc-cluster <none> unit=pxc-cluster 10.100.179.58 3306/TCP
|
||||
pxc-node1 <none> name=pxc-node1 10.100.217.202 3306/TCP
|
||||
4444/TCP
|
||||
4567/TCP
|
||||
4568/TCP
|
||||
pxc-node2 <none> name=pxc-node2 10.100.47.212 3306/TCP
|
||||
4444/TCP
|
||||
4567/TCP
|
||||
4568/TCP
|
||||
pxc-node3 <none> name=pxc-node3 10.100.200.14 3306/TCP
|
||||
4444/TCP
|
||||
4567/TCP
|
||||
4568/TCP
|
||||
|
||||
```
|
||||
|
||||
The cluster should be ready for use!
|
||||
|
||||
### Connecting to the cluster
|
||||
|
||||
Using the name of ```pxc-cluster``` service running interactively using ```kubernetes exec```, it is possible to connect to any of the pods using the mysql client on the pod's container to verify the cluster size, which should be ```3```. In this example below, pxc-node3 replication controller is chosen, and to find out the pod name, ```kubectl get pods``` and ```awk``` are employed:
|
||||
|
||||
```
|
||||
$ kubectl get pods|grep pxc-node3|awk '{ print $1 }'
|
||||
pxc-node3-0b5mc
|
||||
|
||||
$ kubectl exec pxc-node3-0b5mc -i -t -- mysql -u root -p -h pxc-cluster
|
||||
|
||||
Enter password:
|
||||
Welcome to the MySQL monitor. Commands end with ; or \g.
|
||||
Your MySQL connection id is 5
|
||||
Server version: 5.6.24-72.2-56-log Percona XtraDB Cluster (GPL), Release rel72.2, Revision 43abf03, WSREP version 25.11, wsrep_25.11
|
||||
|
||||
Copyright (c) 2009-2015 Percona LLC and/or its affiliates
|
||||
Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
|
||||
Oracle is a registered trademark of Oracle Corporation and/or its
|
||||
affiliates. Other names may be trademarks of their respective
|
||||
owners.
|
||||
|
||||
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
|
||||
|
||||
mysql> show status like 'wsrep_cluster_size';
|
||||
+--------------------+-------+
|
||||
| Variable_name | Value |
|
||||
+--------------------+-------+
|
||||
| wsrep_cluster_size | 3 |
|
||||
+--------------------+-------+
|
||||
1 row in set (0.06 sec)
|
||||
|
||||
```
|
||||
|
||||
At this point, there is a working cluster that can begin being used via the pxc-cluster service IP address!
|
||||
|
||||
### TODO
|
||||
|
||||
This setup certainly can become more fluid and dynamic. One idea is to perhaps use an etcd container to store information about node state. Originally, there was a read-only kubernetes API available to each container but that has since been removed. Also, Kelsey Hightower is working on moving the functionality of confd to Kubernetes. This could replace the shell duct tape that builds the cluster configuration file for the image.
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
56
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/image/Dockerfile
generated
vendored
Normal file
56
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/image/Dockerfile
generated
vendored
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM ubuntu:trusty
|
||||
|
||||
# add our user and group first to make sure their IDs get assigned
|
||||
# consistently, regardless of whatever dependencies get added
|
||||
RUN groupadd -r mysql && useradd -r -g mysql mysql
|
||||
|
||||
ENV PERCONA_XTRADB_VERSION 5.6
|
||||
ENV MYSQL_VERSION 5.6
|
||||
ENV TERM linux
|
||||
|
||||
RUN apt-get update
|
||||
RUN DEBIAN_FRONTEND=noninteractive apt-get install -y perl --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN apt-key adv --keyserver keys.gnupg.net --recv-keys 1C4CBDCDCD2EFD2A
|
||||
|
||||
RUN echo "deb http://repo.percona.com/apt trusty main" > /etc/apt/sources.list.d/percona.list
|
||||
RUN echo "deb-src http://repo.percona.com/apt trusty main" >> /etc/apt/sources.list.d/percona.list
|
||||
|
||||
# the "/var/lib/mysql" stuff here is because the mysql-server
|
||||
# postinst doesn't have an explicit way to disable the
|
||||
# mysql_install_db codepath besides having a database already
|
||||
# "configured" (ie, stuff in /var/lib/mysql/mysql)
|
||||
# also, we set debconf keys to make APT a little quieter
|
||||
RUN { \
|
||||
echo percona-server-server-5.6 percona-server-server/data-dir select ''; \
|
||||
echo percona-server-server-5.6 percona-server-server/root_password password ''; \
|
||||
} | debconf-set-selections \
|
||||
&& apt-get update && DEBIAN_FRONTEND=nointeractive apt-get install -y percona-xtradb-cluster-client-"${MYSQL_VERSION}" \
|
||||
percona-xtradb-cluster-common-"${MYSQL_VERSION}" percona-xtradb-cluster-server-"${MYSQL_VERSION}" \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& rm -rf /var/lib/mysql && mkdir -p /var/lib/mysql && chown -R mysql:mysql /var/lib/mysql
|
||||
|
||||
VOLUME /var/lib/mysql
|
||||
|
||||
COPY my.cnf /etc/mysql/my.cnf
|
||||
COPY cluster.cnf /etc/mysql/conf.d/cluster.cnf
|
||||
|
||||
COPY docker-entrypoint.sh /entrypoint.sh
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
|
||||
EXPOSE 3306 4444 4567 4568
|
||||
CMD ["mysqld"]
|
||||
12
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/image/cluster.cnf
generated
vendored
Normal file
12
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/image/cluster.cnf
generated
vendored
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
[mysqld]
|
||||
|
||||
wsrep_provider=/usr/lib/libgalera_smm.so
|
||||
wsrep_cluster_address=gcomm://
|
||||
binlog_format=ROW
|
||||
default_storage_engine=InnoDB
|
||||
innodb_autoinc_lock_mode=2
|
||||
|
||||
wsrep_sst_method=xtrabackup-v2
|
||||
wsrep_node_address=127.0.0.1
|
||||
wsrep_cluster_name=galera_kubernetes
|
||||
wsrep_sst_auth=sstuser:changethis
|
||||
164
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/image/docker-entrypoint.sh
generated
vendored
Executable file
164
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/image/docker-entrypoint.sh
generated
vendored
Executable file
|
|
@ -0,0 +1,164 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
#
|
||||
# This script does the following:
|
||||
#
|
||||
# 1. Sets up database privileges by building an SQL script
|
||||
# 2. MySQL is initially started with this script a first time
|
||||
# 3. Modify my.cnf and cluster.cnf to reflect available nodes to join
|
||||
#
|
||||
|
||||
# if NUM_NODES not passed, default to 3
|
||||
if [ -z "$NUM_NODES" ]; then
|
||||
NUM_NODES=3
|
||||
fi
|
||||
|
||||
if [ "${1:0:1}" = '-' ]; then
|
||||
set -- mysqld "$@"
|
||||
fi
|
||||
|
||||
# if the command passed is 'mysqld' via CMD, then begin processing.
|
||||
if [ "$1" = 'mysqld' ]; then
|
||||
# read DATADIR from the MySQL config
|
||||
DATADIR="$("$@" --verbose --help 2>/dev/null | awk '$1 == "datadir" { print $2; exit }')"
|
||||
|
||||
# only check if system tables not created from mysql_install_db and permissions
|
||||
# set with initial SQL script before proceeding to build SQL script
|
||||
if [ ! -d "$DATADIR/mysql" ]; then
|
||||
# fail if user didn't supply a root password
|
||||
if [ -z "$MYSQL_ROOT_PASSWORD" -a -z "$MYSQL_ALLOW_EMPTY_PASSWORD" ]; then
|
||||
echo >&2 'error: database is uninitialized and MYSQL_ROOT_PASSWORD not set'
|
||||
echo >&2 ' Did you forget to add -e MYSQL_ROOT_PASSWORD=... ?'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# mysql_install_db installs system tables
|
||||
echo 'Running mysql_install_db ...'
|
||||
mysql_install_db --datadir="$DATADIR"
|
||||
echo 'Finished mysql_install_db'
|
||||
|
||||
# this script will be run once when MySQL first starts to set up
|
||||
# prior to creating system tables and will ensure proper user permissions
|
||||
tempSqlFile='/tmp/mysql-first-time.sql'
|
||||
cat > "$tempSqlFile" <<-EOSQL
|
||||
DELETE FROM mysql.user ;
|
||||
CREATE USER 'root'@'%' IDENTIFIED BY '${MYSQL_ROOT_PASSWORD}' ;
|
||||
GRANT ALL ON *.* TO 'root'@'%' WITH GRANT OPTION ;
|
||||
EOSQL
|
||||
|
||||
if [ "$MYSQL_DATABASE" ]; then
|
||||
echo "CREATE DATABASE IF NOT EXISTS \`$MYSQL_DATABASE\` ;" >> "$tempSqlFile"
|
||||
fi
|
||||
|
||||
if [ "$MYSQL_USER" -a "$MYSQL_PASSWORD" ]; then
|
||||
echo "CREATE USER '$MYSQL_USER'@'%' IDENTIFIED BY '$MYSQL_PASSWORD' ;" >> "$tempSqlFile"
|
||||
|
||||
if [ "$MYSQL_DATABASE" ]; then
|
||||
echo "GRANT ALL ON \`$MYSQL_DATABASE\`.* TO '$MYSQL_USER'@'%' ;" >> "$tempSqlFile"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Add SST (Single State Transfer) user if Clustering is turned on
|
||||
if [ -n "$GALERA_CLUSTER" ]; then
|
||||
# this is the Single State Transfer user (SST, initial dump or xtrabackup user)
|
||||
WSREP_SST_USER=${WSREP_SST_USER:-"sst"}
|
||||
if [ -z "$WSREP_SST_PASSWORD" ]; then
|
||||
echo >&2 'error: Galera cluster is enabled and WSREP_SST_PASSWORD is not set'
|
||||
echo >&2 ' Did you forget to add -e WSREP_SST__PASSWORD=... ?'
|
||||
exit 1
|
||||
fi
|
||||
# add single state transfer (SST) user privileges
|
||||
echo "CREATE USER '${WSREP_SST_USER}'@'localhost' IDENTIFIED BY '${WSREP_SST_PASSWORD}';" >> "$tempSqlFile"
|
||||
echo "GRANT RELOAD, LOCK TABLES, REPLICATION CLIENT ON *.* TO '${WSREP_SST_USER}'@'localhost';" >> "$tempSqlFile"
|
||||
fi
|
||||
|
||||
echo 'FLUSH PRIVILEGES ;' >> "$tempSqlFile"
|
||||
|
||||
# Add the SQL file to mysqld's command line args
|
||||
set -- "$@" --init-file="$tempSqlFile"
|
||||
fi
|
||||
|
||||
chown -R mysql:mysql "$DATADIR"
|
||||
fi
|
||||
|
||||
# if cluster is turned on, then proceed to build cluster setting strings
|
||||
# that will be interpolated into the config files
|
||||
if [ -n "$GALERA_CLUSTER" ]; then
|
||||
# this is the Single State Transfer user (SST, initial dump or xtrabackup user)
|
||||
WSREP_SST_USER=${WSREP_SST_USER:-"sst"}
|
||||
if [ -z "$WSREP_SST_PASSWORD" ]; then
|
||||
echo >&2 'error: database is uninitialized and WSREP_SST_PASSWORD not set'
|
||||
echo >&2 ' Did you forget to add -e WSREP_SST_PASSWORD=xxx ?'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# user/password for SST user
|
||||
sed -i -e "s|^wsrep_sst_auth=sstuser:changethis|wsrep_sst_auth=${WSREP_SST_USER}:${WSREP_SST_PASSWORD}|" /etc/mysql/conf.d/cluster.cnf
|
||||
|
||||
# set nodes own address
|
||||
WSREP_NODE_ADDRESS=`ip addr show | grep -E '^[ ]*inet' | grep -m1 global | awk '{ print $2 }' | sed -e 's/\/.*//'`
|
||||
if [ -n "$WSREP_NODE_ADDRESS" ]; then
|
||||
sed -i -e "s|^wsrep_node_address=.*$|wsrep_node_address=${WSREP_NODE_ADDRESS}|" /etc/mysql/conf.d/cluster.cnf
|
||||
fi
|
||||
|
||||
# if the string is not defined or it only is 'gcomm://', this means bootstrap
|
||||
if [ -z "$WSREP_CLUSTER_ADDRESS" -o "$WSREP_CLUSTER_ADDRESS" == "gcomm://" ]; then
|
||||
# if empty, set to 'gcomm://'
|
||||
# NOTE: this list does not imply membership.
|
||||
# It only means "obtain SST and join from one of these..."
|
||||
if [ -z "$WSREP_CLUSTER_ADDRESS" ]; then
|
||||
WSREP_CLUSTER_ADDRESS="gcomm://"
|
||||
fi
|
||||
|
||||
# loop through number of nodes
|
||||
for NUM in `seq 1 $NUM_NODES`; do
|
||||
NODE_SERVICE_HOST="PXC_NODE${NUM}_SERVICE_HOST"
|
||||
|
||||
# if set
|
||||
if [ -n "${!NODE_SERVICE_HOST}" ]; then
|
||||
# if not its own IP, then add it
|
||||
if [ $(expr "$HOSTNAME" : "pxc-node${NUM}") -eq 0 ]; then
|
||||
# if not the first bootstrap node add comma
|
||||
if [ $WSREP_CLUSTER_ADDRESS != "gcomm://" ]; then
|
||||
WSREP_CLUSTER_ADDRESS="${WSREP_CLUSTER_ADDRESS},"
|
||||
fi
|
||||
# append
|
||||
# if user specifies USE_IP, use that
|
||||
if [ -n "${USE_IP}" ]; then
|
||||
WSREP_CLUSTER_ADDRESS="${WSREP_CLUSTER_ADDRESS}"${!NODE_SERVICE_HOST}
|
||||
# otherwise use DNS
|
||||
else
|
||||
WSREP_CLUSTER_ADDRESS="${WSREP_CLUSTER_ADDRESS}pxc-node${NUM}"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# WSREP_CLUSTER_ADDRESS is now complete and will be interpolated into the
|
||||
# cluster address string (wsrep_cluster_address) in the cluster
|
||||
# configuration file, cluster.cnf
|
||||
if [ -n "$WSREP_CLUSTER_ADDRESS" -a "$WSREP_CLUSTER_ADDRESS" != "gcomm://" ]; then
|
||||
sed -i -e "s|^wsrep_cluster_address=gcomm://|wsrep_cluster_address=${WSREP_CLUSTER_ADDRESS}|" /etc/mysql/conf.d/cluster.cnf
|
||||
fi
|
||||
fi
|
||||
|
||||
# random server ID needed
|
||||
sed -i -e "s/^server\-id=.*$/server-id=${RANDOM}/" /etc/mysql/my.cnf
|
||||
|
||||
# finally, start mysql
|
||||
exec "$@"
|
||||
55
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/image/my.cnf
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/image/my.cnf
generated
vendored
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
[client]
|
||||
port=3306
|
||||
socket=/var/run/mysqld/mysqld.sock
|
||||
|
||||
[mysqld_safe]
|
||||
socket=/var/run/mysqld/mysqld.sock
|
||||
nice=0
|
||||
|
||||
[mysqld]
|
||||
user=mysql
|
||||
pid-file=/var/run/mysqld/mysqld.pid
|
||||
socket=/var/run/mysqld/mysqld.sock
|
||||
port=3306
|
||||
basedir=/usr
|
||||
datadir=/var/lib/mysql
|
||||
tmpdir=/tmp
|
||||
lc-messages-dir=/usr/share/mysql
|
||||
skip-external-locking
|
||||
|
||||
key_buffer=16M
|
||||
max_allowed_packet=16M
|
||||
thread_stack=192K
|
||||
thread_cache_size=8
|
||||
|
||||
myisam-recover=BACKUP
|
||||
#max_connections=100
|
||||
query_cache_limit=1M
|
||||
query_cache_size=16M
|
||||
slow_query_log=1
|
||||
slow_query_log_file=/var/log/mysql/mysql-slow.log
|
||||
long_query_time=2
|
||||
log-queries-not-using-indexes
|
||||
|
||||
server-id=12345
|
||||
log_bin=/var/log/mysql/mysql-bin.log
|
||||
expire_logs_days=4
|
||||
max_binlog_size=100M
|
||||
|
||||
default_storage_engine=InnoDB
|
||||
innodb_file_per_table
|
||||
innodb_log_file_size=100M
|
||||
innodb_log_buffer_size=10M
|
||||
innodb_log_files_in_group=2
|
||||
innodb_buffer_pool_instances=4
|
||||
innodb_buffer_pool_size=100M
|
||||
|
||||
[mysqldump]
|
||||
quick
|
||||
quote-names
|
||||
max_allowed_packet=16M
|
||||
|
||||
[isamchk]
|
||||
key_buffer=16M
|
||||
|
||||
!includedir /etc/mysql/conf.d/
|
||||
12
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-cluster-service.yaml
generated
vendored
Normal file
12
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-cluster-service.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: pxc-cluster
|
||||
labels:
|
||||
unit: pxc-cluster
|
||||
spec:
|
||||
ports:
|
||||
- port: 3306
|
||||
name: mysql
|
||||
selector:
|
||||
unit: pxc-cluster
|
||||
57
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-node1.yaml
generated
vendored
Normal file
57
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-node1.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: pxc-node1
|
||||
labels:
|
||||
node: pxc-node1
|
||||
spec:
|
||||
ports:
|
||||
- port: 3306
|
||||
name: mysql
|
||||
- port: 4444
|
||||
name: state-snapshot-transfer
|
||||
- port: 4567
|
||||
name: replication-traffic
|
||||
- port: 4568
|
||||
name: incremental-state-transfer
|
||||
selector:
|
||||
node: pxc-node1
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: pxc-node1
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
node: pxc-node1
|
||||
unit: pxc-cluster
|
||||
spec:
|
||||
containers:
|
||||
- resources:
|
||||
limits:
|
||||
cpu: 0.3
|
||||
image: capttofu/percona_xtradb_cluster_5_6:beta
|
||||
name: pxc-node1
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
- containerPort: 4444
|
||||
- containerPort: 4567
|
||||
- containerPort: 4568
|
||||
env:
|
||||
- name: GALERA_CLUSTER
|
||||
value: "true"
|
||||
- name: WSREP_CLUSTER_ADDRESS
|
||||
value: gcomm://
|
||||
- name: WSREP_SST_USER
|
||||
value: sst
|
||||
- name: WSREP_SST_PASSWORD
|
||||
value: sst
|
||||
- name: MYSQL_USER
|
||||
value: mysql
|
||||
- name: MYSQL_PASSWORD
|
||||
value: mysql
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
value: c-krit
|
||||
58
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-node2.yaml
generated
vendored
Normal file
58
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-node2.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: pxc-node2
|
||||
labels:
|
||||
node: pxc-node2
|
||||
spec:
|
||||
ports:
|
||||
- port: 3306
|
||||
name: mysql
|
||||
- port: 4444
|
||||
name: state-snapshot-transfer
|
||||
- port: 4567
|
||||
name: replication-traffic
|
||||
- port: 4568
|
||||
name: incremental-state-transfer
|
||||
selector:
|
||||
node: pxc-node2
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: pxc-node2
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
node: pxc-node2
|
||||
unit: pxc-cluster
|
||||
spec:
|
||||
containers:
|
||||
- resources:
|
||||
limits:
|
||||
cpu: 0.3
|
||||
image: capttofu/percona_xtradb_cluster_5_6:beta
|
||||
name: pxc-node2
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
- containerPort: 4444
|
||||
- containerPort: 4567
|
||||
- containerPort: 4568
|
||||
env:
|
||||
- name: GALERA_CLUSTER
|
||||
value: "true"
|
||||
- name: WSREP_CLUSTER_ADDRESS
|
||||
value: gcomm://
|
||||
- name: WSREP_SST_USER
|
||||
value: sst
|
||||
- name: WSREP_SST_PASSWORD
|
||||
value: sst
|
||||
- name: MYSQL_USER
|
||||
value: mysql
|
||||
- name: MYSQL_PASSWORD
|
||||
value: mysql
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
value: c-krit
|
||||
58
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-node3.yaml
generated
vendored
Normal file
58
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-node3.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: pxc-node3
|
||||
labels:
|
||||
node: pxc-node3
|
||||
spec:
|
||||
ports:
|
||||
- port: 3306
|
||||
name: mysql
|
||||
- port: 4444
|
||||
name: state-snapshot-transfer
|
||||
- port: 4567
|
||||
name: replication-traffic
|
||||
- port: 4568
|
||||
name: incremental-state-transfer
|
||||
selector:
|
||||
node: pxc-node3
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: pxc-node3
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
node: pxc-node3
|
||||
unit: pxc-cluster
|
||||
spec:
|
||||
containers:
|
||||
- resources:
|
||||
limits:
|
||||
cpu: 0.3
|
||||
image: capttofu/percona_xtradb_cluster_5_6:beta
|
||||
name: pxc-node3
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
- containerPort: 4444
|
||||
- containerPort: 4567
|
||||
- containerPort: 4568
|
||||
env:
|
||||
- name: GALERA_CLUSTER
|
||||
value: "true"
|
||||
- name: WSREP_CLUSTER_ADDRESS
|
||||
value: gcomm://
|
||||
- name: WSREP_SST_USER
|
||||
value: sst
|
||||
- name: WSREP_SST_PASSWORD
|
||||
value: sst
|
||||
- name: MYSQL_USER
|
||||
value: mysql
|
||||
- name: MYSQL_PASSWORD
|
||||
value: mysql
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
value: c-krit
|
||||
133
vendor/k8s.io/kubernetes/examples/storage/redis/README.md
generated
vendored
Normal file
133
vendor/k8s.io/kubernetes/examples/storage/redis/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,133 @@
|
|||
## Reliable, Scalable Redis on Kubernetes
|
||||
|
||||
The following document describes the deployment of a reliable, multi-node Redis on Kubernetes. It deploys a master with replicated slaves, as well as replicated redis sentinels which are use for health checking and failover.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
This example assumes that you have a Kubernetes cluster installed and running, and that you have installed the ```kubectl``` command line tool somewhere in your path. Please see the [getting started](../../../docs/getting-started-guides/) for installation instructions for your platform.
|
||||
|
||||
### A note for the impatient
|
||||
|
||||
This is a somewhat long tutorial. If you want to jump straight to the "do it now" commands, please see the [tl; dr](#tl-dr) at the end.
|
||||
|
||||
### Turning up an initial master/sentinel pod.
|
||||
|
||||
A [_Pod_](../../../docs/user-guide/pods.md) is one or more containers that _must_ be scheduled onto the same host. All containers in a pod share a network namespace, and may optionally share mounted volumes.
|
||||
|
||||
We will use the shared network namespace to bootstrap our Redis cluster. In particular, the very first sentinel needs to know how to find the master (subsequent sentinels just ask the first sentinel). Because all containers in a Pod share a network namespace, the sentinel can simply look at ```$(hostname -i):6379```.
|
||||
|
||||
Here is the config for the initial master and sentinel pod: [redis-master.yaml](redis-master.yaml)
|
||||
|
||||
|
||||
Create this master as follows:
|
||||
|
||||
```sh
|
||||
kubectl create -f examples/storage/redis/redis-master.yaml
|
||||
```
|
||||
|
||||
### Turning up a sentinel service
|
||||
|
||||
In Kubernetes a [_Service_](../../../docs/user-guide/services.md) describes a set of Pods that perform the same task. For example, the set of nodes in a Cassandra cluster, or even the single node we created above. An important use for a Service is to create a load balancer which distributes traffic across members of the set. But a _Service_ can also be used as a standing query which makes a dynamically changing set of Pods (or the single Pod we've already created) available via the Kubernetes API.
|
||||
|
||||
In Redis, we will use a Kubernetes Service to provide a discoverable endpoints for the Redis sentinels in the cluster. From the sentinels Redis clients can find the master, and then the slaves and other relevant info for the cluster. This enables new members to join the cluster when failures occur.
|
||||
|
||||
Here is the definition of the sentinel service: [redis-sentinel-service.yaml](redis-sentinel-service.yaml)
|
||||
|
||||
Create this service:
|
||||
|
||||
```sh
|
||||
kubectl create -f examples/storage/redis/redis-sentinel-service.yaml
|
||||
```
|
||||
|
||||
### Turning up replicated redis servers
|
||||
|
||||
So far, what we have done is pretty manual, and not very fault-tolerant. If the ```redis-master``` pod that we previously created is destroyed for some reason (e.g. a machine dying) our Redis service goes away with it.
|
||||
|
||||
In Kubernetes a [_Replication Controller_](../../../docs/user-guide/replication-controller.md) is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of it's set. Unlike a _Service_ it also has a desired number of replicas, and it will create or delete _Pods_ to ensure that the number of _Pods_ matches up with it's desired state.
|
||||
|
||||
Replication Controllers will "adopt" existing pods that match their selector query, so let's create a Replication Controller with a single replica to adopt our existing Redis server. Here is the replication controller config: [redis-controller.yaml](redis-controller.yaml)
|
||||
|
||||
The bulk of this controller config is actually identical to the redis-master pod definition above. It forms the template or "cookie cutter" that defines what it means to be a member of this set.
|
||||
|
||||
Create this controller:
|
||||
|
||||
```sh
|
||||
kubectl create -f examples/storage/redis/redis-controller.yaml
|
||||
```
|
||||
|
||||
We'll do the same thing for the sentinel. Here is the controller config: [redis-sentinel-controller.yaml](redis-sentinel-controller.yaml)
|
||||
|
||||
We create it as follows:
|
||||
|
||||
```sh
|
||||
kubectl create -f examples/storage/redis/redis-sentinel-controller.yaml
|
||||
```
|
||||
|
||||
### Scale our replicated pods
|
||||
|
||||
Initially creating those pods didn't actually do anything, since we only asked for one sentinel and one redis server, and they already existed, nothing changed. Now we will add more replicas:
|
||||
|
||||
```sh
|
||||
kubectl scale rc redis --replicas=3
|
||||
```
|
||||
|
||||
```sh
|
||||
kubectl scale rc redis-sentinel --replicas=3
|
||||
```
|
||||
|
||||
This will create two additional replicas of the redis server and two additional replicas of the redis sentinel.
|
||||
|
||||
Unlike our original redis-master pod, these pods exist independently, and they use the ```redis-sentinel-service``` that we defined above to discover and join the cluster.
|
||||
|
||||
### Delete our manual pod
|
||||
|
||||
The final step in the cluster turn up is to delete the original redis-master pod that we created manually. While it was useful for bootstrapping discovery in the cluster, we really don't want the lifespan of our sentinel to be tied to the lifespan of one of our redis servers, and now that we have a successful, replicated redis sentinel service up and running, the binding is unnecessary.
|
||||
|
||||
Delete the master as follows:
|
||||
|
||||
```sh
|
||||
kubectl delete pods redis-master
|
||||
```
|
||||
|
||||
Now let's take a close look at what happens after this pod is deleted. There are three things that happen:
|
||||
|
||||
1. The redis replication controller notices that its desired state is 3 replicas, but there are currently only 2 replicas, and so it creates a new redis server to bring the replica count back up to 3
|
||||
2. The redis-sentinel replication controller likewise notices the missing sentinel, and also creates a new sentinel.
|
||||
3. The redis sentinels themselves, realize that the master has disappeared from the cluster, and begin the election procedure for selecting a new master. They perform this election and selection, and chose one of the existing redis server replicas to be the new master.
|
||||
|
||||
### Conclusion
|
||||
|
||||
At this point we now have a reliable, scalable Redis installation. By scaling the replication controller for redis servers, we can increase or decrease the number of read-slaves in our cluster. Likewise, if failures occur, the redis-sentinels will perform master election and select a new master.
|
||||
|
||||
**NOTE:** since redis 3.2 some security measures (bind to 127.0.0.1 and `--protected-mode`) are enabled by default. Please read about this in http://antirez.com/news/96
|
||||
|
||||
|
||||
### tl; dr
|
||||
|
||||
For those of you who are impatient, here is the summary of commands we ran in this tutorial:
|
||||
|
||||
```
|
||||
# Create a bootstrap master
|
||||
kubectl create -f examples/storage/redis/redis-master.yaml
|
||||
|
||||
# Create a service to track the sentinels
|
||||
kubectl create -f examples/storage/redis/redis-sentinel-service.yaml
|
||||
|
||||
# Create a replication controller for redis servers
|
||||
kubectl create -f examples/storage/redis/redis-controller.yaml
|
||||
|
||||
# Create a replication controller for redis sentinels
|
||||
kubectl create -f examples/storage/redis/redis-sentinel-controller.yaml
|
||||
|
||||
# Scale both replication controllers
|
||||
kubectl scale rc redis --replicas=3
|
||||
kubectl scale rc redis-sentinel --replicas=3
|
||||
|
||||
# Delete the original master pod
|
||||
kubectl delete pods redis-master
|
||||
```
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
25
vendor/k8s.io/kubernetes/examples/storage/redis/image/Dockerfile
generated
vendored
Normal file
25
vendor/k8s.io/kubernetes/examples/storage/redis/image/Dockerfile
generated
vendored
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM alpine:3.4
|
||||
|
||||
RUN apk add -U redis sed bash && rm -rf /var/cache/apk/*
|
||||
|
||||
COPY redis-master.conf /redis-master/redis.conf
|
||||
COPY redis-slave.conf /redis-slave/redis.conf
|
||||
COPY run.sh /run.sh
|
||||
|
||||
CMD [ "/run.sh" ]
|
||||
|
||||
ENTRYPOINT [ "bash", "-c" ]
|
||||
828
vendor/k8s.io/kubernetes/examples/storage/redis/image/redis-master.conf
generated
vendored
Normal file
828
vendor/k8s.io/kubernetes/examples/storage/redis/image/redis-master.conf
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
828
vendor/k8s.io/kubernetes/examples/storage/redis/image/redis-slave.conf
generated
vendored
Normal file
828
vendor/k8s.io/kubernetes/examples/storage/redis/image/redis-slave.conf
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
85
vendor/k8s.io/kubernetes/examples/storage/redis/image/run.sh
generated
vendored
Executable file
85
vendor/k8s.io/kubernetes/examples/storage/redis/image/run.sh
generated
vendored
Executable file
|
|
@ -0,0 +1,85 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
function launchmaster() {
|
||||
if [[ ! -e /redis-master-data ]]; then
|
||||
echo "Redis master data doesn't exist, data won't be persistent!"
|
||||
mkdir /redis-master-data
|
||||
fi
|
||||
redis-server /redis-master/redis.conf --protected-mode no
|
||||
}
|
||||
|
||||
function launchsentinel() {
|
||||
while true; do
|
||||
master=$(redis-cli -h ${REDIS_SENTINEL_SERVICE_HOST} -p ${REDIS_SENTINEL_SERVICE_PORT} --csv SENTINEL get-master-addr-by-name mymaster | tr ',' ' ' | cut -d' ' -f1)
|
||||
if [[ -n ${master} ]]; then
|
||||
master="${master//\"}"
|
||||
else
|
||||
master=$(hostname -i)
|
||||
fi
|
||||
|
||||
redis-cli -h ${master} INFO
|
||||
if [[ "$?" == "0" ]]; then
|
||||
break
|
||||
fi
|
||||
echo "Connecting to master failed. Waiting..."
|
||||
sleep 10
|
||||
done
|
||||
|
||||
sentinel_conf=sentinel.conf
|
||||
|
||||
echo "sentinel monitor mymaster ${master} 6379 2" > ${sentinel_conf}
|
||||
echo "sentinel down-after-milliseconds mymaster 60000" >> ${sentinel_conf}
|
||||
echo "sentinel failover-timeout mymaster 180000" >> ${sentinel_conf}
|
||||
echo "sentinel parallel-syncs mymaster 1" >> ${sentinel_conf}
|
||||
echo "bind 0.0.0.0"
|
||||
|
||||
redis-sentinel ${sentinel_conf} --protected-mode no
|
||||
}
|
||||
|
||||
function launchslave() {
|
||||
while true; do
|
||||
master=$(redis-cli -h ${REDIS_SENTINEL_SERVICE_HOST} -p ${REDIS_SENTINEL_SERVICE_PORT} --csv SENTINEL get-master-addr-by-name mymaster | tr ',' ' ' | cut -d' ' -f1)
|
||||
if [[ -n ${master} ]]; then
|
||||
master="${master//\"}"
|
||||
else
|
||||
echo "Failed to find master."
|
||||
sleep 60
|
||||
exit 1
|
||||
fi
|
||||
redis-cli -h ${master} INFO
|
||||
if [[ "$?" == "0" ]]; then
|
||||
break
|
||||
fi
|
||||
echo "Connecting to master failed. Waiting..."
|
||||
sleep 10
|
||||
done
|
||||
sed -i "s/%master-ip%/${master}/" /redis-slave/redis.conf
|
||||
sed -i "s/%master-port%/6379/" /redis-slave/redis.conf
|
||||
redis-server /redis-slave/redis.conf --protected-mode no
|
||||
}
|
||||
|
||||
if [[ "${MASTER}" == "true" ]]; then
|
||||
launchmaster
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ "${SENTINEL}" == "true" ]]; then
|
||||
launchsentinel
|
||||
exit 0
|
||||
fi
|
||||
|
||||
launchslave
|
||||
28
vendor/k8s.io/kubernetes/examples/storage/redis/redis-controller.yaml
generated
vendored
Normal file
28
vendor/k8s.io/kubernetes/examples/storage/redis/redis-controller.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: redis
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
name: redis
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: redis
|
||||
spec:
|
||||
containers:
|
||||
- name: redis
|
||||
image: gcr.io/google_containers/redis:v1
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
resources:
|
||||
limits:
|
||||
cpu: "0.1"
|
||||
volumeMounts:
|
||||
- mountPath: /redis-master-data
|
||||
name: data
|
||||
volumes:
|
||||
- name: data
|
||||
emptyDir: {}
|
||||
|
||||
33
vendor/k8s.io/kubernetes/examples/storage/redis/redis-master.yaml
generated
vendored
Normal file
33
vendor/k8s.io/kubernetes/examples/storage/redis/redis-master.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
name: redis
|
||||
redis-sentinel: "true"
|
||||
role: master
|
||||
name: redis-master
|
||||
spec:
|
||||
containers:
|
||||
- name: master
|
||||
image: gcr.io/google_containers/redis:v1
|
||||
env:
|
||||
- name: MASTER
|
||||
value: "true"
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
resources:
|
||||
limits:
|
||||
cpu: "0.1"
|
||||
volumeMounts:
|
||||
- mountPath: /redis-master-data
|
||||
name: data
|
||||
- name: sentinel
|
||||
image: kubernetes/redis:v1
|
||||
env:
|
||||
- name: SENTINEL
|
||||
value: "true"
|
||||
ports:
|
||||
- containerPort: 26379
|
||||
volumes:
|
||||
- name: data
|
||||
emptyDir: {}
|
||||
14
vendor/k8s.io/kubernetes/examples/storage/redis/redis-proxy.yaml
generated
vendored
Normal file
14
vendor/k8s.io/kubernetes/examples/storage/redis/redis-proxy.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
name: redis-proxy
|
||||
role: proxy
|
||||
name: redis-proxy
|
||||
spec:
|
||||
containers:
|
||||
- name: proxy
|
||||
image: kubernetes/redis-proxy:v2
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
name: api
|
||||
23
vendor/k8s.io/kubernetes/examples/storage/redis/redis-sentinel-controller.yaml
generated
vendored
Normal file
23
vendor/k8s.io/kubernetes/examples/storage/redis/redis-sentinel-controller.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: redis-sentinel
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
redis-sentinel: "true"
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: redis-sentinel
|
||||
redis-sentinel: "true"
|
||||
role: sentinel
|
||||
spec:
|
||||
containers:
|
||||
- name: sentinel
|
||||
image: gcr.io/google_containers/redis:v1
|
||||
env:
|
||||
- name: SENTINEL
|
||||
value: "true"
|
||||
ports:
|
||||
- containerPort: 26379
|
||||
13
vendor/k8s.io/kubernetes/examples/storage/redis/redis-sentinel-service.yaml
generated
vendored
Normal file
13
vendor/k8s.io/kubernetes/examples/storage/redis/redis-sentinel-service.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
name: sentinel
|
||||
role: service
|
||||
name: redis-sentinel
|
||||
spec:
|
||||
ports:
|
||||
- port: 26379
|
||||
targetPort: 26379
|
||||
selector:
|
||||
redis-sentinel: "true"
|
||||
130
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/README.md
generated
vendored
Normal file
130
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,130 @@
|
|||
RethinkDB Cluster on Kubernetes
|
||||
==============================
|
||||
|
||||
Setting up a [rethinkdb](http://rethinkdb.com/) cluster on [kubernetes](http://kubernetes.io)
|
||||
|
||||
**Features**
|
||||
|
||||
* Auto configuration cluster by querying info from k8s
|
||||
* Simple
|
||||
|
||||
Quick start
|
||||
-----------
|
||||
|
||||
**Step 1**
|
||||
|
||||
Rethinkdb will discover its peer using endpoints provided by kubernetes service,
|
||||
so first create a service so the following pod can query its endpoint
|
||||
|
||||
```sh
|
||||
$kubectl create -f examples/storage/rethinkdb/driver-service.yaml
|
||||
```
|
||||
|
||||
check out:
|
||||
|
||||
```sh
|
||||
$kubectl get services
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
rethinkdb-driver 10.0.27.114 <none> 28015/TCP db=rethinkdb 10m
|
||||
[...]
|
||||
```
|
||||
|
||||
**Step 2**
|
||||
|
||||
start the first server in the cluster
|
||||
|
||||
```sh
|
||||
$kubectl create -f examples/storage/rethinkdb/rc.yaml
|
||||
```
|
||||
|
||||
Actually, you can start servers as many as you want at one time, just modify the `replicas` in `rc.ymal`
|
||||
|
||||
check out again:
|
||||
|
||||
```sh
|
||||
$kubectl get pods
|
||||
NAME READY REASON RESTARTS AGE
|
||||
[...]
|
||||
rethinkdb-rc-r4tb0 1/1 Running 0 1m
|
||||
```
|
||||
|
||||
**Done!**
|
||||
|
||||
|
||||
---
|
||||
|
||||
Scale
|
||||
-----
|
||||
|
||||
You can scale up your cluster using `kubectl scale`. The new pod will join to the existing cluster automatically, for example
|
||||
|
||||
|
||||
```sh
|
||||
$kubectl scale rc rethinkdb-rc --replicas=3
|
||||
scaled
|
||||
|
||||
$kubectl get pods
|
||||
NAME READY REASON RESTARTS AGE
|
||||
[...]
|
||||
rethinkdb-rc-f32c5 1/1 Running 0 1m
|
||||
rethinkdb-rc-m4d50 1/1 Running 0 1m
|
||||
rethinkdb-rc-r4tb0 1/1 Running 0 3m
|
||||
```
|
||||
|
||||
Admin
|
||||
-----
|
||||
|
||||
You need a separate pod (labeled as role:admin) to access Web Admin UI
|
||||
|
||||
```sh
|
||||
kubectl create -f examples/storage/rethinkdb/admin-pod.yaml
|
||||
kubectl create -f examples/storage/rethinkdb/admin-service.yaml
|
||||
```
|
||||
|
||||
find the service
|
||||
|
||||
```console
|
||||
$kubectl get services
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
[...]
|
||||
rethinkdb-admin 10.0.131.19 104.197.19.120 8080/TCP db=rethinkdb,role=admin 10m
|
||||
rethinkdb-driver 10.0.27.114 <none> 28015/TCP db=rethinkdb 20m
|
||||
```
|
||||
|
||||
We request an external load balancer in the [admin-service.yaml](admin-service.yaml) file:
|
||||
|
||||
```
|
||||
type: LoadBalancer
|
||||
```
|
||||
|
||||
The external load balancer allows us to access the service from outside the firewall via an external IP, 104.197.19.120 in this case.
|
||||
|
||||
Note that you may need to create a firewall rule to allow the traffic, assuming you are using Google Compute Engine:
|
||||
|
||||
```console
|
||||
$ gcloud compute firewall-rules create rethinkdb --allow=tcp:8080
|
||||
```
|
||||
|
||||
Now you can open a web browser and access to *http://104.197.19.120:8080* to manage your cluster.
|
||||
|
||||
|
||||
|
||||
**Why not just using pods in replicas?**
|
||||
|
||||
This is because kube-proxy will act as a load balancer and send your traffic to different server,
|
||||
since the ui is not stateless when playing with Web Admin UI will cause `Connection not open on server` error.
|
||||
|
||||
|
||||
- - -
|
||||
|
||||
**BTW**
|
||||
|
||||
* `gen_pod.sh` is using to generate pod templates for my local cluster,
|
||||
the generated pods which is using `nodeSelector` to force k8s to schedule containers to my designate nodes, for I need to access persistent data on my host dirs. Note that one needs to label the node before 'nodeSelector' can work, see this [tutorial](../../../docs/user-guide/node-selection/)
|
||||
|
||||
* see [antmanler/rethinkdb-k8s](https://github.com/antmanler/rethinkdb-k8s) for detail
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
29
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/admin-pod.yaml
generated
vendored
Normal file
29
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/admin-pod.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
db: rethinkdb
|
||||
role: admin
|
||||
name: rethinkdb-admin
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/rethinkdb:1.16.0_1
|
||||
name: rethinkdb
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: admin-port
|
||||
- containerPort: 28015
|
||||
name: driver-port
|
||||
- containerPort: 29015
|
||||
name: cluster-port
|
||||
volumeMounts:
|
||||
- mountPath: /data/rethinkdb_data
|
||||
name: rethinkdb-storage
|
||||
volumes:
|
||||
- name: rethinkdb-storage
|
||||
emptyDir: {}
|
||||
14
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/admin-service.yaml
generated
vendored
Normal file
14
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/admin-service.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
db: rethinkdb
|
||||
name: rethinkdb-admin
|
||||
spec:
|
||||
ports:
|
||||
- port: 8080
|
||||
targetPort: 8080
|
||||
type: LoadBalancer
|
||||
selector:
|
||||
db: rethinkdb
|
||||
role: admin
|
||||
12
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/driver-service.yaml
generated
vendored
Normal file
12
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/driver-service.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
db: rethinkdb
|
||||
name: rethinkdb-driver
|
||||
spec:
|
||||
ports:
|
||||
- port: 28015
|
||||
targetPort: 28015
|
||||
selector:
|
||||
db: rethinkdb
|
||||
73
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/gen-pod.sh
generated
vendored
Executable file
73
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/gen-pod.sh
generated
vendored
Executable file
|
|
@ -0,0 +1,73 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
: ${VERSION:=1.16.0}
|
||||
|
||||
readonly NAME=${1-}
|
||||
if [[ -z "${NAME}" ]]; then
|
||||
echo -e "\033[1;31mName must be specified\033[0m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ADMIN=""
|
||||
if [[ ${NAME} == "admin" ]]; then
|
||||
ADMIN="role: admin"
|
||||
fi
|
||||
|
||||
NODE=""
|
||||
# One needs to label a node with the same key/value pair,
|
||||
# i.e., 'kubectl label nodes <node-name> name=${2}'
|
||||
if [[ ! -z "${2-}" ]]; then
|
||||
NODE="nodeSelector: { name: ${2} }"
|
||||
fi
|
||||
|
||||
cat << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
${ADMIN}
|
||||
db: rethinkdb
|
||||
name: rethinkdb-${NAME}-${VERSION}
|
||||
namespace: rethinkdb
|
||||
spec:
|
||||
containers:
|
||||
- image: antmanler/rethinkdb:${VERSION}
|
||||
name: rethinkdb
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: admin-port
|
||||
protocol: TCP
|
||||
- containerPort: 28015
|
||||
name: driver-port
|
||||
protocol: TCP
|
||||
- containerPort: 29015
|
||||
name: cluster-port
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- mountPath: /data/rethinkdb_data
|
||||
name: rethinkdb-storage
|
||||
${NODE}
|
||||
restartPolicy: Always
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /data/db/rethinkdb
|
||||
name: rethinkdb-storage
|
||||
EOF
|
||||
28
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/image/Dockerfile
generated
vendored
Normal file
28
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/image/Dockerfile
generated
vendored
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM rethinkdb:1.16.0
|
||||
|
||||
MAINTAINER BinZhao <wo@zhaob.in>
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -yq curl && \
|
||||
rm -rf /var/cache/apt/* && rm -rf /var/lib/apt/lists/* && \
|
||||
curl -L http://stedolan.github.io/jq/download/linux64/jq > /usr/bin/jq && \
|
||||
chmod u+x /usr/bin/jq
|
||||
|
||||
COPY ./run.sh /usr/bin/run.sh
|
||||
RUN chmod u+x /usr/bin/run.sh
|
||||
|
||||
CMD "/usr/bin/run.sh"
|
||||
44
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/image/run.sh
generated
vendored
Normal file
44
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/image/run.sh
generated
vendored
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o pipefail
|
||||
|
||||
echo Checking for other nodes
|
||||
IP=""
|
||||
if [[ -n "${KUBERNETES_SERVICE_HOST}" ]]; then
|
||||
|
||||
POD_NAMESPACE=${POD_NAMESPACE:-default}
|
||||
MYHOST=$(ip addr | grep 'state UP' -A2 | tail -n1 | awk '{print $2}' | cut -f1 -d'/')
|
||||
echo My host: ${MYHOST}
|
||||
|
||||
URL="https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}/api/v1/namespaces/${POD_NAMESPACE}/endpoints/rethinkdb-driver"
|
||||
echo "Endpont url: ${URL}"
|
||||
echo "Looking for IPs..."
|
||||
token=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
|
||||
# try to pick up first different ip from endpoints
|
||||
IP=$(curl -s ${URL} --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt --header "Authorization: Bearer ${token}" \
|
||||
| jq -s -r --arg h "${MYHOST}" '.[0].subsets | .[].addresses | [ .[].ip ] | map(select(. != $h)) | .[0]') || exit 1
|
||||
[[ "${IP}" == null ]] && IP=""
|
||||
fi
|
||||
|
||||
if [[ -n "${IP}" ]]; then
|
||||
ENDPOINT="${IP}:29015"
|
||||
echo "Join to ${ENDPOINT}"
|
||||
exec rethinkdb --bind all --join ${ENDPOINT}
|
||||
else
|
||||
echo "Start single instance"
|
||||
exec rethinkdb --bind all
|
||||
fi
|
||||
38
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/rc.yaml
generated
vendored
Normal file
38
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/rc.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
labels:
|
||||
db: rethinkdb
|
||||
name: rethinkdb-rc
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
db: rethinkdb
|
||||
role: replicas
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
db: rethinkdb
|
||||
role: replicas
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/rethinkdb:1.16.0_1
|
||||
name: rethinkdb
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: admin-port
|
||||
- containerPort: 28015
|
||||
name: driver-port
|
||||
- containerPort: 29015
|
||||
name: cluster-port
|
||||
volumeMounts:
|
||||
- mountPath: /data/rethinkdb_data
|
||||
name: rethinkdb-storage
|
||||
volumes:
|
||||
- name: rethinkdb-storage
|
||||
emptyDir: {}
|
||||
113
vendor/k8s.io/kubernetes/examples/storage/vitess/README.md
generated
vendored
Normal file
113
vendor/k8s.io/kubernetes/examples/storage/vitess/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,113 @@
|
|||
## Vitess Example
|
||||
|
||||
This example shows how to run a [Vitess](http://vitess.io) cluster in Kubernetes.
|
||||
Vitess is a MySQL clustering system developed at YouTube that makes sharding
|
||||
transparent to the application layer. It also makes scaling MySQL within
|
||||
Kubernetes as simple as launching more pods.
|
||||
|
||||
The example brings up a database with 2 shards, and then runs a pool of
|
||||
[sharded guestbook](https://github.com/youtube/vitess/tree/master/examples/kubernetes/guestbook)
|
||||
pods. The guestbook app was ported from the original
|
||||
[guestbook](../../../examples/guestbook-go/)
|
||||
example found elsewhere in this tree, modified to use Vitess as the backend.
|
||||
|
||||
For a more detailed, step-by-step explanation of this example setup, see the
|
||||
[Vitess on Kubernetes](http://vitess.io/getting-started/) guide.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
You'll need to install [Go 1.4+](https://golang.org/doc/install) to build
|
||||
`vtctlclient`, the command-line admin tool for Vitess.
|
||||
|
||||
We also assume you have a running Kubernetes cluster with `kubectl` pointing to
|
||||
it by default. See the [Getting Started guides](../../../docs/getting-started-guides/)
|
||||
for how to get to that point. Note that your Kubernetes cluster needs to have
|
||||
enough resources (CPU+RAM) to schedule all the pods. By default, this example
|
||||
requires a cluster-wide total of at least 6 virtual CPUs and 10GiB RAM. You can
|
||||
tune these requirements in the
|
||||
[resource limits](../../../docs/user-guide/compute-resources.md)
|
||||
section of each YAML file.
|
||||
|
||||
Lastly, you need to open ports 30000-30001 (for the Vitess admin daemon) and 80 (for
|
||||
the guestbook app) in your firewall. See the
|
||||
[Services and Firewalls](../../../docs/user-guide/services-firewalls.md)
|
||||
guide for examples of how to do that.
|
||||
|
||||
### Configure site-local settings
|
||||
|
||||
Run the `configure.sh` script to generate a `config.sh` file, which will be used
|
||||
to customize your cluster settings.
|
||||
|
||||
``` console
|
||||
./configure.sh
|
||||
```
|
||||
|
||||
Currently, we have out-of-the-box support for storing
|
||||
[backups](http://vitess.io/user-guide/backup-and-restore.html) in
|
||||
[Google Cloud Storage](https://cloud.google.com/storage/).
|
||||
If you're using GCS, fill in the fields requested by the configure script.
|
||||
Note that your Kubernetes cluster must be running on instances with the
|
||||
`storage-rw` scope for this to work. With Container Engine, you can do this by
|
||||
passing `--scopes storage-rw` to the `glcoud container clusters create` command.
|
||||
|
||||
For other platforms, you'll need to choose the `file` backup storage plugin,
|
||||
and mount a read-write network volume into the `vttablet` and `vtctld` pods.
|
||||
For example, you can mount any storage service accessible through NFS into a
|
||||
Kubernetes volume. Then provide the mount path to the configure script here.
|
||||
|
||||
If you prefer to skip setting up a backup volume for the purpose of this example,
|
||||
you can choose `file` mode and set the path to `/tmp`.
|
||||
|
||||
### Start Vitess
|
||||
|
||||
``` console
|
||||
./vitess-up.sh
|
||||
```
|
||||
|
||||
This will run through the steps to bring up Vitess. At the end, you should see
|
||||
something like this:
|
||||
|
||||
``` console
|
||||
****************************
|
||||
* Complete!
|
||||
* Use the following line to make an alias to kvtctl:
|
||||
* alias kvtctl='$GOPATH/bin/vtctlclient -server 104.197.47.173:30001'
|
||||
* See the vtctld UI at: http://104.197.47.173:30000
|
||||
****************************
|
||||
```
|
||||
|
||||
### Start the Guestbook app
|
||||
|
||||
``` console
|
||||
./guestbook-up.sh
|
||||
```
|
||||
|
||||
The guestbook service is configured with `type: LoadBalancer` to tell Kubernetes
|
||||
to expose it on an external IP. It may take a minute to set up, but you should
|
||||
soon see the external IP show up under the internal one like this:
|
||||
|
||||
``` console
|
||||
$ kubectl get service guestbook
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
guestbook <none> name=guestbook 10.67.253.173 80/TCP
|
||||
104.197.151.132
|
||||
```
|
||||
|
||||
Visit the external IP in your browser to view the guestbook. Note that in this
|
||||
modified guestbook, there are multiple pages to demonstrate range-based sharding
|
||||
in Vitess. Each page number is assigned to one of the shards using a
|
||||
[consistent hashing](https://en.wikipedia.org/wiki/Consistent_hashing) scheme.
|
||||
|
||||
### Tear down
|
||||
|
||||
``` console
|
||||
./guestbook-down.sh
|
||||
./vitess-down.sh
|
||||
```
|
||||
|
||||
You may also want to remove any firewall rules you created.
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
73
vendor/k8s.io/kubernetes/examples/storage/vitess/configure.sh
generated
vendored
Executable file
73
vendor/k8s.io/kubernetes/examples/storage/vitess/configure.sh
generated
vendored
Executable file
|
|
@ -0,0 +1,73 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script generates config.sh, which is a site-local config file that is not
|
||||
# checked into source control.
|
||||
|
||||
# Select and configure Backup Storage Implementation.
|
||||
storage=gcs
|
||||
read -p "Backup Storage (file, gcs) [gcs]: "
|
||||
if [ -n "$REPLY" ]; then storage="$REPLY"; fi
|
||||
|
||||
case "$storage" in
|
||||
gcs)
|
||||
# Google Cloud Storage
|
||||
project=$(gcloud config list project | grep 'project\s*=' | sed -r 's/^.*=\s*(.*)$/\1/')
|
||||
read -p "Google Developers Console Project [$project]: "
|
||||
if [ -n "$REPLY" ]; then project="$REPLY"; fi
|
||||
if [ -z "$project" ]; then
|
||||
echo "ERROR: Project name must not be empty."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
read -p "Google Cloud Storage bucket for Vitess backups: " bucket
|
||||
if [ -z "$bucket" ]; then
|
||||
echo "ERROR: Bucket name must not be empty."
|
||||
exit 1
|
||||
fi
|
||||
echo
|
||||
echo "NOTE: If you haven't already created this bucket, you can do so by running:"
|
||||
echo " gsutil mb gs://$bucket"
|
||||
echo
|
||||
|
||||
backup_flags=$(echo -backup_storage_implementation gcs \
|
||||
-gcs_backup_storage_project "'$project'" \
|
||||
-gcs_backup_storage_bucket "'$bucket'")
|
||||
;;
|
||||
file)
|
||||
# Mounted volume (e.g. NFS)
|
||||
read -p "Root directory for backups (usually an NFS mount): " file_root
|
||||
if [ -z "$file_root" ]; then
|
||||
echo "ERROR: Root directory must not be empty."
|
||||
exit 1
|
||||
fi
|
||||
echo
|
||||
echo "NOTE: You must add your NFS mount to the vtctld-controller-template"
|
||||
echo " and vttablet-pod-template as described in the Kubernetes docs:"
|
||||
echo " http://kubernetes.io/v1.0/docs/user-guide/volumes.html#nfs"
|
||||
echo
|
||||
|
||||
backup_flags=$(echo -backup_storage_implementation file \
|
||||
-file_backup_storage_root "'$file_root'")
|
||||
;;
|
||||
*)
|
||||
echo "ERROR: Unsupported backup storage implementation: $storage"
|
||||
exit 1
|
||||
esac
|
||||
|
||||
echo "Saving config.sh..."
|
||||
echo "backup_flags=\"$backup_flags\"" > config.sh
|
||||
|
||||
8
vendor/k8s.io/kubernetes/examples/storage/vitess/create_test_table.sql
generated
vendored
Normal file
8
vendor/k8s.io/kubernetes/examples/storage/vitess/create_test_table.sql
generated
vendored
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
CREATE TABLE messages (
|
||||
page BIGINT(20) UNSIGNED,
|
||||
time_created_ns BIGINT(20) UNSIGNED,
|
||||
keyspace_id BIGINT(20) UNSIGNED,
|
||||
message VARCHAR(10000),
|
||||
PRIMARY KEY (page, time_created_ns)
|
||||
) ENGINE=InnoDB
|
||||
|
||||
63
vendor/k8s.io/kubernetes/examples/storage/vitess/env.sh
generated
vendored
Normal file
63
vendor/k8s.io/kubernetes/examples/storage/vitess/env.sh
generated
vendored
Normal file
|
|
@ -0,0 +1,63 @@
|
|||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is an include file used by the other scripts in this directory.
|
||||
|
||||
# Most clusters will just be accessed with 'kubectl' on $PATH.
|
||||
# However, some might require a different command. For example, GKE required
|
||||
# KUBECTL='gcloud beta container kubectl' for a while. Now that most of our
|
||||
# use cases just need KUBECTL=kubectl, we'll make that the default.
|
||||
KUBECTL=${KUBECTL:-kubectl}
|
||||
|
||||
# This should match the nodePort in vtctld-service.yaml
|
||||
VTCTLD_PORT=${VTCTLD_PORT:-30001}
|
||||
|
||||
# Customizable parameters
|
||||
SHARDS=${SHARDS:-'-80,80-'}
|
||||
TABLETS_PER_SHARD=${TABLETS_PER_SHARD:-2}
|
||||
RDONLY_COUNT=${RDONLY_COUNT:-0}
|
||||
MAX_TASK_WAIT_RETRIES=${MAX_TASK_WAIT_RETRIES:-300}
|
||||
MAX_VTTABLET_TOPO_WAIT_RETRIES=${MAX_VTTABLET_TOPO_WAIT_RETRIES:-180}
|
||||
VTTABLET_TEMPLATE=${VTTABLET_TEMPLATE:-'vttablet-pod-template.yaml'}
|
||||
VTGATE_TEMPLATE=${VTGATE_TEMPLATE:-'vtgate-controller-template.yaml'}
|
||||
VTGATE_COUNT=${VTGATE_COUNT:-1}
|
||||
CELLS=${CELLS:-'test'}
|
||||
ETCD_REPLICAS=3
|
||||
|
||||
VTGATE_REPLICAS=$VTGATE_COUNT
|
||||
|
||||
# Get the ExternalIP of any node.
|
||||
get_node_ip() {
|
||||
$KUBECTL get -o template -t '{{range (index .items 0).status.addresses}}{{if eq .type "ExternalIP"}}{{.address}}{{end}}{{end}}' nodes
|
||||
}
|
||||
|
||||
# Try to find vtctld address if not provided.
|
||||
get_vtctld_addr() {
|
||||
if [ -z "$VTCTLD_ADDR" ]; then
|
||||
node_ip=$(get_node_ip)
|
||||
if [ -n "$node_ip" ]; then
|
||||
VTCTLD_ADDR="$node_ip:$VTCTLD_PORT"
|
||||
fi
|
||||
fi
|
||||
echo "$VTCTLD_ADDR"
|
||||
}
|
||||
|
||||
config_file=`dirname "${BASH_SOURCE}"`/config.sh
|
||||
if [ ! -f $config_file ]; then
|
||||
echo "Please run ./configure.sh first to generate config.sh file."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
source $config_file
|
||||
|
||||
54
vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-controller-template.yaml
generated
vendored
Normal file
54
vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-controller-template.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: etcd-{{cell}}
|
||||
spec:
|
||||
replicas: {{replicas}}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: etcd
|
||||
cell: {{cell}}
|
||||
app: vitess
|
||||
spec:
|
||||
volumes:
|
||||
- name: certs
|
||||
hostPath: {path: /etc/ssl/certs}
|
||||
containers:
|
||||
- name: etcd
|
||||
image: vitess/etcd:v2.0.13-lite
|
||||
volumeMounts:
|
||||
- name: certs
|
||||
readOnly: true
|
||||
mountPath: /etc/ssl/certs
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "100m"
|
||||
command:
|
||||
- bash
|
||||
- "-c"
|
||||
- >-
|
||||
ipaddr=$(hostname -i)
|
||||
|
||||
global_etcd=$ETCD_GLOBAL_SERVICE_HOST:$ETCD_GLOBAL_SERVICE_PORT
|
||||
|
||||
cell="{{cell}}" &&
|
||||
local_etcd_host_var="ETCD_${cell^^}_SERVICE_HOST" &&
|
||||
local_etcd_port_var="ETCD_${cell^^}_SERVICE_PORT" &&
|
||||
local_etcd=${!local_etcd_host_var}:${!local_etcd_port_var}
|
||||
|
||||
if [ "{{cell}}" != "global" ]; then
|
||||
until etcdctl -C "http://$global_etcd"
|
||||
set "/vt/cells/{{cell}}" "http://$local_etcd"; do
|
||||
echo "[$(date)] waiting for global etcd to register cell '{{cell}}'";
|
||||
sleep 1;
|
||||
done;
|
||||
fi
|
||||
|
||||
etcd -name $HOSTNAME -discovery {{discovery}}
|
||||
-advertise-client-urls http://$ipaddr:4001
|
||||
-initial-advertise-peer-urls http://$ipaddr:7001
|
||||
-listen-client-urls http://$ipaddr:4001
|
||||
-listen-peer-urls http://$ipaddr:7001
|
||||
|
||||
36
vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-down.sh
generated
vendored
Executable file
36
vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-down.sh
generated
vendored
Executable file
|
|
@ -0,0 +1,36 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is an example script that tears down the etcd servers started by
|
||||
# etcd-up.sh.
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
CELLS=${CELLS:-'test'}
|
||||
cells=`echo $CELLS | tr ',' ' '`
|
||||
|
||||
# Delete replication controllers
|
||||
for cell in 'global' $cells; do
|
||||
echo "Deleting etcd replicationcontroller for $cell cell..."
|
||||
$KUBECTL delete replicationcontroller etcd-$cell
|
||||
|
||||
echo "Deleting etcd service for $cell cell..."
|
||||
$KUBECTL delete service etcd-$cell
|
||||
done
|
||||
|
||||
16
vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-service-template.yaml
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-service-template.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: etcd-{{cell}}
|
||||
labels:
|
||||
component: etcd
|
||||
cell: {{cell}}
|
||||
app: vitess
|
||||
spec:
|
||||
ports:
|
||||
- port: 4001
|
||||
selector:
|
||||
component: etcd
|
||||
cell: {{cell}}
|
||||
app: vitess
|
||||
|
||||
60
vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-up.sh
generated
vendored
Executable file
60
vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-up.sh
generated
vendored
Executable file
|
|
@ -0,0 +1,60 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is an example script that creates etcd clusters.
|
||||
# Vitess requires a global cluster, as well as one for each cell.
|
||||
#
|
||||
# For automatic discovery, an etcd cluster can be bootstrapped from an
|
||||
# existing cluster. In this example, we use an externally-run discovery
|
||||
# service, but you can use your own. See the etcd docs for more:
|
||||
# https://github.com/coreos/etcd/blob/v2.0.13/Documentation/clustering.md
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
replicas=${ETCD_REPLICAS:-3}
|
||||
|
||||
CELLS=${CELLS:-'test'}
|
||||
cells=`echo $CELLS | tr ',' ' '`
|
||||
|
||||
for cell in 'global' $cells; do
|
||||
# Generate a discovery token.
|
||||
echo "Generating discovery token for $cell cell..."
|
||||
discovery=$(curl -sL https://discovery.etcd.io/new?size=$replicas)
|
||||
if [ -z "$discovery" ]; then
|
||||
echo "Failed to get etcd discovery token for cell '$cell'."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create the client service, which will load-balance across all replicas.
|
||||
echo "Creating etcd service for $cell cell..."
|
||||
cat etcd-service-template.yaml | \
|
||||
sed -e "s/{{cell}}/$cell/g" | \
|
||||
$KUBECTL create -f -
|
||||
|
||||
# Expand template variables
|
||||
sed_script=""
|
||||
for var in cell discovery replicas; do
|
||||
sed_script+="s,{{$var}},${!var},g;"
|
||||
done
|
||||
|
||||
# Create the replication controller.
|
||||
echo "Creating etcd replicationcontroller for $cell cell..."
|
||||
cat etcd-controller-template.yaml | sed -e "$sed_script" | $KUBECTL create -f -
|
||||
done
|
||||
|
||||
23
vendor/k8s.io/kubernetes/examples/storage/vitess/guestbook-controller.yaml
generated
vendored
Normal file
23
vendor/k8s.io/kubernetes/examples/storage/vitess/guestbook-controller.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
kind: ReplicationController
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: guestbook
|
||||
app: vitess
|
||||
spec:
|
||||
containers:
|
||||
- name: guestbook
|
||||
image: vitess/guestbook:v2.0.0-alpha5
|
||||
ports:
|
||||
- name: http-server
|
||||
containerPort: 8080
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "100m"
|
||||
|
||||
28
vendor/k8s.io/kubernetes/examples/storage/vitess/guestbook-down.sh
generated
vendored
Executable file
28
vendor/k8s.io/kubernetes/examples/storage/vitess/guestbook-down.sh
generated
vendored
Executable file
|
|
@ -0,0 +1,28 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is an example script that stops guestbook.
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
echo "Deleting guestbook replicationcontroller..."
|
||||
$KUBECTL delete replicationcontroller guestbook
|
||||
|
||||
echo "Deleting guestbook service..."
|
||||
$KUBECTL delete service guestbook
|
||||
16
vendor/k8s.io/kubernetes/examples/storage/vitess/guestbook-service.yaml
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/examples/storage/vitess/guestbook-service.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: guestbook
|
||||
labels:
|
||||
component: guestbook
|
||||
app: vitess
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: http-server
|
||||
selector:
|
||||
component: guestbook
|
||||
app: vitess
|
||||
type: LoadBalancer
|
||||
|
||||
28
vendor/k8s.io/kubernetes/examples/storage/vitess/guestbook-up.sh
generated
vendored
Executable file
28
vendor/k8s.io/kubernetes/examples/storage/vitess/guestbook-up.sh
generated
vendored
Executable file
|
|
@ -0,0 +1,28 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is an example script that starts a guestbook replicationcontroller.
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
echo "Creating guestbook service..."
|
||||
$KUBECTL create -f guestbook-service.yaml
|
||||
|
||||
echo "Creating guestbook replicationcontroller..."
|
||||
$KUBECTL create -f guestbook-controller.yaml
|
||||
23
vendor/k8s.io/kubernetes/examples/storage/vitess/vitess-down.sh
generated
vendored
Executable file
23
vendor/k8s.io/kubernetes/examples/storage/vitess/vitess-down.sh
generated
vendored
Executable file
|
|
@ -0,0 +1,23 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
./vtgate-down.sh
|
||||
SHARDS=$SHARDS CELLS=$CELLS TABLETS_PER_SHARD=$TABLETS_PER_SHARD ./vttablet-down.sh
|
||||
./vtctld-down.sh
|
||||
./etcd-down.sh
|
||||
165
vendor/k8s.io/kubernetes/examples/storage/vitess/vitess-up.sh
generated
vendored
Executable file
165
vendor/k8s.io/kubernetes/examples/storage/vitess/vitess-up.sh
generated
vendored
Executable file
|
|
@ -0,0 +1,165 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is an example script that creates a fully functional vitess cluster.
|
||||
# It performs the following steps:
|
||||
# - Create etcd clusters
|
||||
# - Create vtctld pod
|
||||
# - Create vttablet pods
|
||||
# - Perform vtctl initialization:
|
||||
# SetKeyspaceShardingInfo, Rebuild Keyspace, Reparent Shard, Apply Schema
|
||||
# - Create vtgate pods
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
cells=`echo $CELLS | tr ',' ' '`
|
||||
num_cells=`echo $cells | wc -w`
|
||||
|
||||
function update_spinner_value () {
|
||||
spinner='-\|/'
|
||||
cur_spinner=${spinner:$(($1%${#spinner})):1}
|
||||
}
|
||||
|
||||
function wait_for_running_tasks () {
|
||||
# This function waits for pods to be in the "Running" state
|
||||
# 1. task_name: Name that the desired task begins with
|
||||
# 2. num_tasks: Number of tasks to wait for
|
||||
# Returns:
|
||||
# 0 if successful, -1 if timed out
|
||||
task_name=$1
|
||||
num_tasks=$2
|
||||
counter=0
|
||||
|
||||
echo "Waiting for ${num_tasks}x $task_name to enter state Running"
|
||||
|
||||
while [ $counter -lt $MAX_TASK_WAIT_RETRIES ]; do
|
||||
# Get status column of pods with name starting with $task_name,
|
||||
# count how many are in state Running
|
||||
num_running=`$KUBECTL get pods | grep ^$task_name | grep Running | wc -l`
|
||||
|
||||
echo -en "\r$task_name: $num_running out of $num_tasks in state Running..."
|
||||
if [ $num_running -eq $num_tasks ]
|
||||
then
|
||||
echo Complete
|
||||
return 0
|
||||
fi
|
||||
update_spinner_value $counter
|
||||
echo -n $cur_spinner
|
||||
let counter=counter+1
|
||||
sleep 1
|
||||
done
|
||||
echo Timed out
|
||||
return -1
|
||||
}
|
||||
|
||||
if [ -z "$GOPATH" ]; then
|
||||
echo "ERROR: GOPATH undefined, can't obtain vtctlclient"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
export KUBECTL='kubectl'
|
||||
|
||||
echo "Downloading and installing vtctlclient..."
|
||||
go get -u github.com/youtube/vitess/go/cmd/vtctlclient
|
||||
num_shards=`echo $SHARDS | tr "," " " | wc -w`
|
||||
total_tablet_count=$(($num_shards*$TABLETS_PER_SHARD*$num_cells))
|
||||
vtgate_count=$VTGATE_COUNT
|
||||
if [ $vtgate_count -eq 0 ]; then
|
||||
vtgate_count=$(($total_tablet_count/4>3?$total_tablet_count/4:3))
|
||||
fi
|
||||
|
||||
echo "****************************"
|
||||
echo "*Creating vitess cluster:"
|
||||
echo "* Shards: $SHARDS"
|
||||
echo "* Tablets per shard: $TABLETS_PER_SHARD"
|
||||
echo "* Rdonly per shard: $RDONLY_COUNT"
|
||||
echo "* VTGate count: $vtgate_count"
|
||||
echo "* Cells: $cells"
|
||||
echo "****************************"
|
||||
|
||||
echo 'Running etcd-up.sh' && CELLS=$CELLS ./etcd-up.sh
|
||||
wait_for_running_tasks etcd-global 3
|
||||
for cell in $cells; do
|
||||
wait_for_running_tasks etcd-$cell 3
|
||||
done
|
||||
|
||||
echo 'Running vtctld-up.sh' && ./vtctld-up.sh
|
||||
echo 'Running vttablet-up.sh' && CELLS=$CELLS ./vttablet-up.sh
|
||||
echo 'Running vtgate-up.sh' && ./vtgate-up.sh
|
||||
|
||||
wait_for_running_tasks vtctld 1
|
||||
wait_for_running_tasks vttablet $total_tablet_count
|
||||
wait_for_running_tasks vtgate $vtgate_count
|
||||
|
||||
vtctld_port=30001
|
||||
vtctld_ip=`kubectl get -o yaml nodes | grep 'type: ExternalIP' -B 1 | head -1 | awk '{print $NF}'`
|
||||
vtctl_server="$vtctld_ip:$vtctld_port"
|
||||
kvtctl="$GOPATH/bin/vtctlclient -server $vtctl_server"
|
||||
|
||||
echo Waiting for tablets to be visible in the topology
|
||||
counter=0
|
||||
while [ $counter -lt $MAX_VTTABLET_TOPO_WAIT_RETRIES ]; do
|
||||
num_tablets=0
|
||||
for cell in $cells; do
|
||||
num_tablets=$(($num_tablets+`$kvtctl ListAllTablets $cell | wc -l`))
|
||||
done
|
||||
echo -en "\r$num_tablets out of $total_tablet_count in topology..."
|
||||
if [ $num_tablets -eq $total_tablet_count ]
|
||||
then
|
||||
echo Complete
|
||||
break
|
||||
fi
|
||||
update_spinner_value $counter
|
||||
echo -n $cur_spinner
|
||||
let counter=counter+1
|
||||
sleep 1
|
||||
if [ $counter -eq $MAX_VTTABLET_TOPO_WAIT_RETRIES ]
|
||||
then
|
||||
echo Timed out
|
||||
fi
|
||||
done
|
||||
|
||||
# split_shard_count = num_shards for sharded keyspace, 0 for unsharded
|
||||
split_shard_count=$num_shards
|
||||
if [ $split_shard_count -eq 1 ]; then
|
||||
split_shard_count=0
|
||||
fi
|
||||
|
||||
echo -n Setting Keyspace Sharding Info...
|
||||
$kvtctl SetKeyspaceShardingInfo -force -split_shard_count $split_shard_count test_keyspace keyspace_id uint64
|
||||
echo Done
|
||||
echo -n Rebuilding Keyspace Graph...
|
||||
$kvtctl RebuildKeyspaceGraph test_keyspace
|
||||
echo Done
|
||||
echo -n Reparenting...
|
||||
shard_num=1
|
||||
for shard in $(echo $SHARDS | tr "," " "); do
|
||||
$kvtctl InitShardMaster -force test_keyspace/$shard `echo $cells | awk '{print $1}'`-0000000${shard_num}00
|
||||
let shard_num=shard_num+1
|
||||
done
|
||||
echo Done
|
||||
echo -n Applying Schema...
|
||||
$kvtctl ApplySchema -sql "$(cat create_test_table.sql)" test_keyspace
|
||||
echo Done
|
||||
|
||||
echo "****************************"
|
||||
echo "* Complete!"
|
||||
echo "* Use the following line to make an alias to kvtctl:"
|
||||
echo "* alias kvtctl='\$GOPATH/bin/vtctlclient -server $vtctl_server'"
|
||||
echo "* See the vtctld UI at: http://${vtctld_ip}:30000"
|
||||
echo "****************************"
|
||||
|
||||
55
vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-controller-template.yaml
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-controller-template.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
kind: ReplicationController
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: vtctld
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: vtctld
|
||||
app: vitess
|
||||
spec:
|
||||
containers:
|
||||
- name: vtctld
|
||||
image: vitess/lite:v2.0.0-alpha5
|
||||
volumeMounts:
|
||||
- name: syslog
|
||||
mountPath: /dev/log
|
||||
- name: vtdataroot
|
||||
mountPath: /vt/vtdataroot
|
||||
- name: certs
|
||||
readOnly: true
|
||||
mountPath: /etc/ssl/certs
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "100m"
|
||||
command:
|
||||
- sh
|
||||
- "-c"
|
||||
- >-
|
||||
mkdir -p $VTDATAROOT/tmp &&
|
||||
chown -R vitess /vt &&
|
||||
su -p -c "/vt/bin/vtctld
|
||||
-debug
|
||||
-templates $VTTOP/go/cmd/vtctld/templates
|
||||
-web_dir $VTTOP/web/vtctld
|
||||
-log_dir $VTDATAROOT/tmp
|
||||
-alsologtostderr
|
||||
-port 15000
|
||||
-grpc_port 15001
|
||||
-service_map 'grpc-vtctl'
|
||||
-topo_implementation etcd
|
||||
-tablet_protocol grpc
|
||||
-tablet_manager_protocol grpc
|
||||
-etcd_global_addrs http://$ETCD_GLOBAL_SERVICE_HOST:$ETCD_GLOBAL_SERVICE_PORT
|
||||
{{backup_flags}}" vitess
|
||||
volumes:
|
||||
- name: syslog
|
||||
hostPath: {path: /dev/log}
|
||||
- name: vtdataroot
|
||||
emptyDir: {}
|
||||
- name: certs
|
||||
hostPath: {path: /etc/ssl/certs}
|
||||
|
||||
28
vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-down.sh
generated
vendored
Executable file
28
vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-down.sh
generated
vendored
Executable file
|
|
@ -0,0 +1,28 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is an example script that stops vtctld.
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
echo "Deleting vtctld replicationcontroller..."
|
||||
$KUBECTL delete replicationcontroller vtctld
|
||||
|
||||
echo "Deleting vtctld service..."
|
||||
$KUBECTL delete service vtctld
|
||||
22
vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-service.yaml
generated
vendored
Normal file
22
vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-service.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: vtctld
|
||||
labels:
|
||||
component: vtctld
|
||||
app: vitess
|
||||
spec:
|
||||
ports:
|
||||
- port: 15000
|
||||
name: web
|
||||
targetPort: 15000
|
||||
nodePort: 30000
|
||||
- port: 15001
|
||||
name: grpc
|
||||
targetPort: 15001
|
||||
nodePort: 30001
|
||||
selector:
|
||||
component: vtctld
|
||||
app: vitess
|
||||
type: NodePort
|
||||
|
||||
40
vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-up.sh
generated
vendored
Executable file
40
vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-up.sh
generated
vendored
Executable file
|
|
@ -0,0 +1,40 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is an example script that starts vtctld.
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
echo "Creating vtctld service..."
|
||||
$KUBECTL create -f vtctld-service.yaml
|
||||
|
||||
echo "Creating vtctld replicationcontroller..."
|
||||
# Expand template variables
|
||||
sed_script=""
|
||||
for var in backup_flags; do
|
||||
sed_script+="s,{{$var}},${!var},g;"
|
||||
done
|
||||
|
||||
# Instantiate template and send to kubectl.
|
||||
cat vtctld-controller-template.yaml | sed -e "$sed_script" | $KUBECTL create -f -
|
||||
|
||||
server=$(get_vtctld_addr)
|
||||
echo
|
||||
echo "vtctld address: http://$server"
|
||||
|
||||
45
vendor/k8s.io/kubernetes/examples/storage/vitess/vtgate-controller-template.yaml
generated
vendored
Normal file
45
vendor/k8s.io/kubernetes/examples/storage/vitess/vtgate-controller-template.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
kind: ReplicationController
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: vtgate
|
||||
spec:
|
||||
replicas: {{replicas}}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: vtgate
|
||||
app: vitess
|
||||
spec:
|
||||
containers:
|
||||
- name: vtgate
|
||||
image: vitess/lite:v2.0.0-alpha5
|
||||
volumeMounts:
|
||||
- name: syslog
|
||||
mountPath: /dev/log
|
||||
- name: vtdataroot
|
||||
mountPath: /vt/vtdataroot
|
||||
resources:
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
command:
|
||||
- sh
|
||||
- "-c"
|
||||
- >-
|
||||
mkdir -p $VTDATAROOT/tmp &&
|
||||
chown -R vitess /vt &&
|
||||
su -p -c "/vt/bin/vtgate
|
||||
-topo_implementation etcd
|
||||
-etcd_global_addrs http://$ETCD_GLOBAL_SERVICE_HOST:$ETCD_GLOBAL_SERVICE_PORT
|
||||
-log_dir $VTDATAROOT/tmp
|
||||
-alsologtostderr
|
||||
-port 15001
|
||||
-tablet_protocol grpc
|
||||
-service_map 'bsonrpc-vt-vtgateservice'
|
||||
-cell test" vitess
|
||||
volumes:
|
||||
- name: syslog
|
||||
hostPath: {path: /dev/log}
|
||||
- name: vtdataroot
|
||||
emptyDir: {}
|
||||
|
||||
28
vendor/k8s.io/kubernetes/examples/storage/vitess/vtgate-down.sh
generated
vendored
Executable file
28
vendor/k8s.io/kubernetes/examples/storage/vitess/vtgate-down.sh
generated
vendored
Executable file
|
|
@ -0,0 +1,28 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is an example script that stops vtgate.
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
echo "Deleting vtgate replicationcontroller..."
|
||||
$KUBECTL delete replicationcontroller vtgate
|
||||
|
||||
echo "Deleting vtgate service..."
|
||||
$KUBECTL delete service vtgate
|
||||
15
vendor/k8s.io/kubernetes/examples/storage/vitess/vtgate-service.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/examples/storage/vitess/vtgate-service.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: vtgate
|
||||
labels:
|
||||
component: vtgate
|
||||
app: vitess
|
||||
spec:
|
||||
ports:
|
||||
- port: 15001
|
||||
selector:
|
||||
component: vtgate
|
||||
app: vitess
|
||||
type: LoadBalancer
|
||||
|
||||
38
vendor/k8s.io/kubernetes/examples/storage/vitess/vtgate-up.sh
generated
vendored
Executable file
38
vendor/k8s.io/kubernetes/examples/storage/vitess/vtgate-up.sh
generated
vendored
Executable file
|
|
@ -0,0 +1,38 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is an example script that starts a vtgate replicationcontroller.
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
VTGATE_REPLICAS=${VTGATE_REPLICAS:-3}
|
||||
VTGATE_TEMPLATE=${VTGATE_TEMPLATE:-'vtgate-controller-template.yaml'}
|
||||
|
||||
replicas=$VTGATE_REPLICAS
|
||||
|
||||
echo "Creating vtgate service..."
|
||||
$KUBECTL create -f vtgate-service.yaml
|
||||
|
||||
sed_script=""
|
||||
for var in replicas; do
|
||||
sed_script+="s,{{$var}},${!var},g;"
|
||||
done
|
||||
|
||||
echo "Creating vtgate replicationcontroller..."
|
||||
cat $VTGATE_TEMPLATE | sed -e "$sed_script" | $KUBECTL create -f -
|
||||
51
vendor/k8s.io/kubernetes/examples/storage/vitess/vttablet-down.sh
generated
vendored
Executable file
51
vendor/k8s.io/kubernetes/examples/storage/vitess/vttablet-down.sh
generated
vendored
Executable file
|
|
@ -0,0 +1,51 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is an example script that tears down the vttablet pods started by
|
||||
# vttablet-up.sh.
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
server=$(get_vtctld_addr)
|
||||
|
||||
# Delete the pods for all shards
|
||||
CELLS=${CELLS:-'test'}
|
||||
keyspace='test_keyspace'
|
||||
SHARDS=${SHARDS:-'0'}
|
||||
TABLETS_PER_SHARD=${TABLETS_PER_SHARD:-5}
|
||||
UID_BASE=${UID_BASE:-100}
|
||||
|
||||
num_shards=`echo $SHARDS | tr "," " " | wc -w`
|
||||
uid_base=$UID_BASE
|
||||
|
||||
for shard in `seq 1 $num_shards`; do
|
||||
cell_index=0
|
||||
for cell in `echo $CELLS | tr "," " "`; do
|
||||
for uid_index in `seq 0 $(($TABLETS_PER_SHARD-1))`; do
|
||||
uid=$[$uid_base + $uid_index + $cell_index]
|
||||
printf -v alias '%s-%010d' $cell $uid
|
||||
|
||||
echo "Deleting pod for tablet $alias..."
|
||||
$KUBECTL delete pod vttablet-$uid
|
||||
done
|
||||
let cell_index=cell_index+100000000
|
||||
done
|
||||
let uid_base=uid_base+100
|
||||
done
|
||||
|
||||
128
vendor/k8s.io/kubernetes/examples/storage/vitess/vttablet-pod-template.yaml
generated
vendored
Normal file
128
vendor/k8s.io/kubernetes/examples/storage/vitess/vttablet-pod-template.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,128 @@
|
|||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: vttablet-{{uid}}
|
||||
labels:
|
||||
component: vttablet
|
||||
keyspace: "{{keyspace}}"
|
||||
shard: "{{shard_label}}"
|
||||
tablet: "{{alias}}"
|
||||
app: vitess
|
||||
spec:
|
||||
containers:
|
||||
- name: vttablet
|
||||
image: vitess/lite:v2.0.0-alpha5
|
||||
volumeMounts:
|
||||
- name: syslog
|
||||
mountPath: /dev/log
|
||||
- name: vtdataroot
|
||||
mountPath: /vt/vtdataroot
|
||||
- name: certs
|
||||
readOnly: true
|
||||
mountPath: /etc/ssl/certs
|
||||
resources:
|
||||
limits:
|
||||
memory: "1Gi"
|
||||
cpu: "500m"
|
||||
command:
|
||||
- bash
|
||||
- "-c"
|
||||
- >-
|
||||
set -e
|
||||
|
||||
mysql_socket="$VTDATAROOT/{{tablet_subdir}}/mysql.sock"
|
||||
|
||||
mkdir -p $VTDATAROOT/tmp
|
||||
|
||||
chown -R vitess /vt
|
||||
|
||||
while [ ! -e $mysql_socket ]; do
|
||||
echo "[$(date)] waiting for $mysql_socket" ;
|
||||
sleep 1 ;
|
||||
done
|
||||
|
||||
su -p -s /bin/bash -c "mysql -u vt_dba -S $mysql_socket
|
||||
-e 'CREATE DATABASE IF NOT EXISTS vt_{{keyspace}}'" vitess
|
||||
|
||||
su -p -s /bin/bash -c "/vt/bin/vttablet
|
||||
-topo_implementation etcd
|
||||
-etcd_global_addrs http://$ETCD_GLOBAL_SERVICE_HOST:$ETCD_GLOBAL_SERVICE_PORT
|
||||
-log_dir $VTDATAROOT/tmp
|
||||
-alsologtostderr
|
||||
-port {{port}}
|
||||
-grpc_port {{grpc_port}}
|
||||
-service_map 'grpc-queryservice,grpc-tabletmanager,grpc-updatestream'
|
||||
-binlog_player_protocol grpc
|
||||
-tablet-path {{alias}}
|
||||
-tablet_hostname $(hostname -i)
|
||||
-init_keyspace {{keyspace}}
|
||||
-init_shard {{shard}}
|
||||
-target_tablet_type {{tablet_type}}
|
||||
-mysqlctl_socket $VTDATAROOT/mysqlctl.sock
|
||||
-db-config-app-uname vt_app
|
||||
-db-config-app-dbname vt_{{keyspace}}
|
||||
-db-config-app-charset utf8
|
||||
-db-config-dba-uname vt_dba
|
||||
-db-config-dba-dbname vt_{{keyspace}}
|
||||
-db-config-dba-charset utf8
|
||||
-db-config-repl-uname vt_repl
|
||||
-db-config-repl-dbname vt_{{keyspace}}
|
||||
-db-config-repl-charset utf8
|
||||
-db-config-filtered-uname vt_filtered
|
||||
-db-config-filtered-dbname vt_{{keyspace}}
|
||||
-db-config-filtered-charset utf8
|
||||
-enable-rowcache
|
||||
-rowcache-bin /usr/bin/memcached
|
||||
-rowcache-socket $VTDATAROOT/{{tablet_subdir}}/memcache.sock
|
||||
-health_check_interval 5s
|
||||
-restore_from_backup {{backup_flags}}" vitess
|
||||
- name: mysql
|
||||
image: vitess/lite:v2.0.0-alpha5
|
||||
volumeMounts:
|
||||
- name: syslog
|
||||
mountPath: /dev/log
|
||||
- name: vtdataroot
|
||||
mountPath: /vt/vtdataroot
|
||||
resources:
|
||||
limits:
|
||||
memory: "1Gi"
|
||||
cpu: "500m"
|
||||
command:
|
||||
- sh
|
||||
- "-c"
|
||||
- >-
|
||||
mkdir -p $VTDATAROOT/tmp &&
|
||||
chown -R vitess /vt
|
||||
|
||||
su -p -c "/vt/bin/mysqlctld
|
||||
-log_dir $VTDATAROOT/tmp
|
||||
-alsologtostderr
|
||||
-tablet_uid {{uid}}
|
||||
-socket_file $VTDATAROOT/mysqlctl.sock
|
||||
-db-config-app-uname vt_app
|
||||
-db-config-app-dbname vt_{{keyspace}}
|
||||
-db-config-app-charset utf8
|
||||
-db-config-dba-uname vt_dba
|
||||
-db-config-dba-dbname vt_{{keyspace}}
|
||||
-db-config-dba-charset utf8
|
||||
-db-config-repl-uname vt_repl
|
||||
-db-config-repl-dbname vt_{{keyspace}}
|
||||
-db-config-repl-charset utf8
|
||||
-db-config-filtered-uname vt_filtered
|
||||
-db-config-filtered-dbname vt_{{keyspace}}
|
||||
-db-config-filtered-charset utf8
|
||||
-bootstrap_archive mysql-db-dir_10.0.13-MariaDB.tbz" vitess
|
||||
# The bootstrap archive above contains an empty mysql data dir
|
||||
# with user permissions set up as required by Vitess. The archive is
|
||||
# included in the Docker image.
|
||||
env:
|
||||
- name: EXTRA_MY_CNF
|
||||
value: /vt/config/mycnf/master_mariadb.cnf
|
||||
volumes:
|
||||
- name: syslog
|
||||
hostPath: {path: /dev/log}
|
||||
- name: vtdataroot
|
||||
emptyDir: {}
|
||||
- name: certs
|
||||
hostPath: {path: /etc/ssl/certs}
|
||||
|
||||
68
vendor/k8s.io/kubernetes/examples/storage/vitess/vttablet-up.sh
generated
vendored
Executable file
68
vendor/k8s.io/kubernetes/examples/storage/vitess/vttablet-up.sh
generated
vendored
Executable file
|
|
@ -0,0 +1,68 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is an example script that creates a vttablet deployment.
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
# Create the pods for shard-0
|
||||
CELLS=${CELLS:-'test'}
|
||||
keyspace='test_keyspace'
|
||||
SHARDS=${SHARDS:-'0'}
|
||||
TABLETS_PER_SHARD=${TABLETS_PER_SHARD:-5}
|
||||
port=15002
|
||||
grpc_port=16002
|
||||
UID_BASE=${UID_BASE:-100}
|
||||
VTTABLET_TEMPLATE=${VTTABLET_TEMPLATE:-'vttablet-pod-template.yaml'}
|
||||
RDONLY_COUNT=${RDONLY_COUNT:-2}
|
||||
|
||||
uid_base=$UID_BASE
|
||||
for shard in $(echo $SHARDS | tr "," " "); do
|
||||
cell_index=0
|
||||
for cell in `echo $CELLS | tr ',' ' '`; do
|
||||
echo "Creating $keyspace.shard-$shard pods in cell $CELL..."
|
||||
for uid_index in `seq 0 $(($TABLETS_PER_SHARD-1))`; do
|
||||
uid=$[$uid_base + $uid_index + $cell_index]
|
||||
printf -v alias '%s-%010d' $cell $uid
|
||||
printf -v tablet_subdir 'vt_%010d' $uid
|
||||
|
||||
echo "Creating pod for tablet $alias..."
|
||||
|
||||
# Add xx to beginning or end if there is a dash. K8s does not allow for
|
||||
# leading or trailing dashes for labels
|
||||
shard_label=`echo $shard | sed s'/[-]$/-xx/' | sed s'/^-/xx-/'`
|
||||
|
||||
tablet_type=replica
|
||||
if [ $uid_index -gt $(($TABLETS_PER_SHARD-$RDONLY_COUNT-1)) ]; then
|
||||
tablet_type=rdonly
|
||||
fi
|
||||
|
||||
# Expand template variables
|
||||
sed_script=""
|
||||
for var in alias cell uid keyspace shard shard_label port grpc_port tablet_subdir tablet_type backup_flags; do
|
||||
sed_script+="s,{{$var}},${!var},g;"
|
||||
done
|
||||
|
||||
# Instantiate template and send to kubectl.
|
||||
cat $VTTABLET_TEMPLATE | sed -e "$sed_script" | $KUBECTL create -f -
|
||||
done
|
||||
let cell_index=cell_index+100000000
|
||||
done
|
||||
let uid_base=uid_base+100
|
||||
done
|
||||
Loading…
Add table
Add a link
Reference in a new issue