1
0
Fork 0
forked from barak/tarpoon

Add glide.yaml and vendor deps

This commit is contained in:
Dalton Hubble 2016-12-03 22:43:32 -08:00
parent db918f12ad
commit 5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions

62
vendor/k8s.io/kubernetes/test/kubemark/common.sh generated vendored Normal file
View file

@ -0,0 +1,62 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source "${KUBE_ROOT}/cluster/kubemark/config-default.sh"
source "${KUBE_ROOT}/cluster/kubemark/util.sh"
source "${KUBE_ROOT}/cluster/lib/util.sh"
# hack/lib/init.sh will ovewrite ETCD_VERSION if this is unset
# what what is default in hack/lib/etcd.sh
# To avoid it, if it is empty, we set it to 'avoid-overwrite' and
# clean it after that.
if [ -z "${ETCD_IMAGE}" ]; then
ETCD_IMAGE="avoid-overwrite"
fi
source "${KUBE_ROOT}/hack/lib/init.sh"
if [ "${ETCD_IMAGE}" == "avoid-overwrite" ]; then
ETCD_IMAGE=""
fi
detect-project &> /dev/null
export PROJECT
find-release-tars
MASTER_NAME="${INSTANCE_PREFIX}-kubemark-master"
MASTER_TAG="kubemark-master"
EVENT_STORE_NAME="${INSTANCE_PREFIX}-event-store"
RETRIES=3
export KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh"
export KUBEMARK_DIRECTORY="${KUBE_ROOT}/test/kubemark"
export RESOURCE_DIRECTORY="${KUBEMARK_DIRECTORY}/resources"
# Runs gcloud compute command with the given parameters. Up to $RETRIES will be made
# to execute the command.
# arguments:
# $@: all stuff that goes after 'gcloud compute '
function run-gcloud-compute-with-retries {
for attempt in $(seq 1 ${RETRIES}); do
if ! gcloud compute $@; then
echo -e "${color_yellow}Attempt $(($attempt+1)) failed to $1 $2 $3. Retrying.${color_norm}" >& 2
sleep $(($attempt * 5))
else
return 0
fi
done
echo -e "${color_red} Failed to $1 $2 $3.${color_norm}" >& 2
exit 1
}

View file

@ -0,0 +1,22 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
curl https://sdk.cloud.google.com 2> /dev/null | bash
sudo gcloud components update kubectl -q
sudo ln -s /usr/local/share/google/google-cloud-sdk/bin/kubectl /bin/
kubectl config set-cluster hollow-cluster --server=http://localhost:8080 --insecure-skip-tls-verify=true
kubectl config set-credentials $(whoami)
kubectl config set-context hollow-context --cluster=hollow-cluster --user=$(whoami)

View file

@ -0,0 +1,18 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kubectl get pod --namespace=kubemark -o json | grep -C100 $1 | grep hollow | head -n1 | cut -d: -f2 | cut -d, -f1 | tr -d \" | tr -d '[[:space:]]'
echo ""

View file

@ -0,0 +1,83 @@
{
"kind": "ReplicationController",
"apiVersion": "v1",
"metadata": {
"name": "heapster-v1.2.0",
"labels": {
"k8s-app": "heapster",
"version": "v1.2.0"
}
},
"spec": {
"replicas": 1,
"selector": {
"k8s-app": "heapster",
"version": "v1.2.0"
},
"template": {
"metadata": {
"labels": {
"k8s-app": "heapster",
"version": "v1.2.0"
}
},
"spec": {
"volumes": [
{
"name": "kubeconfig-volume",
"secret": {
"secretName": "kubeconfig"
}
}
],
"containers": [
{
"name": "heapster",
"image": "gcr.io/google_containers/heapster:v1.2.0",
"resources": {
"requests": {
"cpu": "100m",
"memory": "##METRICS_MEM##Mi"
}
},
"command": [
"/heapster"
],
"args": [
"--source=kubernetes:https://##MASTER_IP##:443?inClusterConfig=0&useServiceAccount=0&auth=/kubeconfig/kubeconfig"
],
"volumeMounts": [
{
"name": "kubeconfig-volume",
"mountPath": "/kubeconfig"
}
]
},
{
"name": "eventer",
"image": "gcr.io/google_containers/heapster:v1.2.0",
"resources": {
"requests": {
"cpu": "100m",
"memory": "##EVENTER_MEM##Ki"
}
},
"command": [
"/eventer"
],
"args": [
"--source=kubernetes:https://104.197.233.84:443?inClusterConfig=0&useServiceAccount=0&auth=/kubeconfig/kubeconfig"
],
"volumeMounts": [
{
"name": "kubeconfig-volume",
"mountPath": "/kubeconfig"
}
]
}]
}
}
}
}

View file

@ -0,0 +1,137 @@
{
"kind": "ReplicationController",
"apiVersion": "v1",
"metadata": {
"name": "hollow-node",
"labels": {
"name": "hollow-node"
}
},
"spec": {
"replicas": ##numreplicas##,
"selector": {
"name": "hollow-node"
},
"template": {
"metadata": {
"labels": {
"name": "hollow-node"
}
},
"spec": {
"volumes": [
{
"name": "kubeconfig-volume",
"secret": {
"secretName": "kubeconfig"
}
},
{
"name": "logs-volume",
"hostPath": {
"path": "/var/logs"
}
}
],
"containers": [
{
"name": "hollow-kubelet",
"image": "gcr.io/##project##/kubemark:latest",
"ports": [
{"containerPort": 4194},
{"containerPort": 10250},
{"containerPort": 10255}
],
"env": [
{
"name": "CONTENT_TYPE",
"valueFrom": {
"configMapKeyRef": {
"name": "node-configmap",
"key": "content.type"
}
}
},
{
"name": "MY_POD_NAME",
"valueFrom": {
"fieldRef": {
"fieldPath": "metadata.name"
}
}
}
],
"command": [
"/bin/sh",
"-c",
"./kubemark.sh --morph=kubelet $(CONTENT_TYPE) --v=2 1>>/var/logs/kubelet_$(MY_POD_NAME).log 2>&1"
],
"volumeMounts": [
{
"name": "kubeconfig-volume",
"mountPath": "/kubeconfig"
},
{
"name": "logs-volume",
"mountPath": "/var/logs"
}
],
"resources": {
"requests": {
"cpu": "50m",
"memory": "100M"
}
},
"securityContext": {
"privileged": true
}
},
{
"name": "hollow-proxy",
"image": "gcr.io/##project##/kubemark:latest",
"env": [
{
"name": "CONTENT_TYPE",
"valueFrom": {
"configMapKeyRef": {
"name": "node-configmap",
"key": "content.type"
}
}
},
{
"name": "MY_POD_NAME",
"valueFrom": {
"fieldRef": {
"fieldPath": "metadata.name"
}
}
}
],
"command": [
"/bin/sh",
"-c",
"./kubemark.sh --morph=proxy $(CONTENT_TYPE) --v=2 1>>/var/logs/kube_proxy_$(MY_POD_NAME).log 2>&1"
],
"volumeMounts": [
{
"name": "kubeconfig-volume",
"mountPath": "/kubeconfig"
},
{
"name": "logs-volume",
"mountPath": "/var/logs"
}
],
"resources": {
"requests": {
"cpu": "20m",
"memory": "100M"
}
}
}]
}
}
}
}

View file

@ -0,0 +1,7 @@
{
"kind": "Namespace",
"apiVersion": "v1",
"metadata": {
"name": "kubemark"
}
}

44
vendor/k8s.io/kubernetes/test/kubemark/run-e2e-tests.sh generated vendored Executable file
View file

@ -0,0 +1,44 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
export KUBERNETES_PROVIDER="kubemark"
export KUBE_CONFIG_FILE="config-default.sh"
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
# We need an absolute path to KUBE_ROOT
ABSOLUTE_ROOT=$(readlink -f ${KUBE_ROOT})
source ${KUBE_ROOT}/cluster/kubemark/util.sh
source ${KUBE_ROOT}/cluster/kubemark/config-default.sh
echo "Kubemark master name: ${MASTER_NAME}"
detect-master
export KUBE_MASTER_URL="https://${KUBE_MASTER_IP}"
export KUBECONFIG="${ABSOLUTE_ROOT}/test/kubemark/resources/kubeconfig.kubemark"
export E2E_MIN_STARTUP_PODS=0
if [[ -z "$@" ]]; then
ARGS='--ginkgo.focus=should\sallow\sstarting\s30\spods\sper\snode'
else
ARGS=$@
fi
go run ./hack/e2e.go -v --check_version_skew=false --test --test_args="--e2e-verify-service-account=false --dump-logs-on-failure=false ${ARGS}"
# Just make local testing easier...
# ${KUBE_ROOT}/hack/ginkgo-e2e.sh "--e2e-verify-service-account=false" "--dump-logs-on-failure=false" $ARGS

View file

@ -0,0 +1,155 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: figure out how to get etcd tag from some real configuration and put it here.
function write_supervisor_conf() {
local name=$1
local exec_command=$2
cat >>"/etc/supervisor/conf.d/${name}.conf" <<EOF
[program:${name}]
command=${exec_command}
stderr_logfile=/var/log/${name}.log
stdout_logfile=/var/log/${name}.log
autorestart=true
startretries=1000000
EOF
}
EVENT_STORE_IP=$1
EVENT_STORE_URL="http://${EVENT_STORE_IP}:4002"
NUM_NODES=$2
EVENT_PD=$3
# KUBEMARK_ETCD_IMAGE may be empty so it has to be kept as a last argument
KUBEMARK_ETCD_IMAGE=$4
if [[ -z "${KUBEMARK_ETCD_IMAGE}" ]]; then
# Default etcd version.
KUBEMARK_ETCD_IMAGE="2.2.1"
fi
function retry() {
for i in {1..4}; do
"$@" && return 0 || sleep $i
done
"$@"
}
function mount-master-pd() {
local -r pd_name=$1
local -r mount_point=$2
if [[ ! -e "/dev/disk/by-id/${pd_name}" ]]; then
echo "Can't find ${pd_name}. Skipping mount."
return
fi
device_info=$(ls -l "/dev/disk/by-id/${pd_name}")
local relative_path=${device_info##* }
pd_device="/dev/disk/by-id/${relative_path}"
echo "Mounting master-pd"
local -r pd_path="/dev/disk/by-id/${pd_name}"
# Format and mount the disk, create directories on it for all of the master's
# persistent data, and link them to where they're used.
mkdir -p "${mount_point}"
# Format only if the disk is not already formatted.
if ! tune2fs -l "${pd_path}" ; then
echo "Formatting '${pd_path}'"
mkfs.ext4 -F -E lazy_itable_init=0,lazy_journal_init=0,discard "${pd_path}"
fi
echo "Mounting '${pd_path}' at '${mount_point}'"
mount -o discard,defaults "${pd_path}" "${mount_point}"
echo "Mounted master-pd '${pd_path}' at '${mount_point}'"
}
main_etcd_mount_point="/mnt/disks/master-pd"
mount-master-pd "google-master-pd" "${main_etcd_mount_point}"
# Contains all the data stored in etcd.
mkdir -m 700 -p "${main_etcd_mount_point}/var/etcd"
ln -s -f "${main_etcd_mount_point}/var/etcd" /var/etcd
mkdir -p /etc/srv
# Contains the dynamically generated apiserver auth certs and keys.
mkdir -p "${main_etcd_mount_point}/srv/kubernetes"
ln -s -f "${main_etcd_mount_point}/srv/kubernetes" /etc/srv/kubernetes
# Directory for kube-apiserver to store SSH key (if necessary).
mkdir -p "${main_etcd_mount_point}/srv/sshproxy"
ln -s -f "${main_etcd_mount_point}/srv/sshproxy" /etc/srv/sshproxy
if [ "${EVENT_PD:-false}" == "true" ]; then
event_etcd_mount_point="/mnt/disks/master-event-pd"
mount-master-pd "google-master-event-pd" "${event_etcd_mount_point}"
# Contains all the data stored in event etcd.
mkdir -m 700 -p "${event_etcd_mount_point}/var/etcd/events"
ln -s -f "${event_etcd_mount_point}/var/etcd/events" /var/etcd/events
fi
ETCD_QUOTA_BYTES=""
if [ "${KUBEMARK_ETCD_VERSION:0:2}" == "3." ]; then
# TODO: Set larger quota to see if that helps with
# 'mvcc: database space exceeded' errors. If so, pipe
# though our setup scripts.
ETCD_QUOTA_BYTES="--quota-backend-bytes=4294967296 "
fi
if [ "${EVENT_STORE_IP}" == "127.0.0.1" ]; then
# Retry starting etcd to avoid pulling image errors.
retry sudo docker run --net=host \
-v /var/etcd/events/data:/var/etcd/data -v /var/log:/var/log -d \
gcr.io/google_containers/etcd:${KUBEMARK_ETCD_IMAGE} /bin/sh -c "/usr/local/bin/etcd \
--listen-peer-urls http://127.0.0.1:2381 \
--advertise-client-urls=http://127.0.0.1:4002 \
--listen-client-urls=http://0.0.0.0:4002 \
--data-dir=/var/etcd/data ${ETCD_QUOTA_BYTES} 1>> /var/log/etcd-events.log 2>&1"
fi
# Retry starting etcd to avoid pulling image errors.
retry sudo docker run --net=host \
-v /var/etcd/data:/var/etcd/data -v /var/log:/var/log -d \
gcr.io/google_containers/etcd:${KUBEMARK_ETCD_IMAGE} /bin/sh -c "/usr/local/bin/etcd \
--listen-peer-urls http://127.0.0.1:2380 \
--advertise-client-urls=http://127.0.0.1:2379 \
--listen-client-urls=http://0.0.0.0:2379 \
--data-dir=/var/etcd/data ${ETCD_QUOTA_BYTES} 1>> /var/log/etcd.log 2>&1"
ulimit_command='bash -c "ulimit -n 65536;'
cd /
tar xzf kubernetes-server-linux-amd64.tar.gz
write_supervisor_conf "kube-scheduler" "${ulimit_command} /kubernetes/server/bin/kube-scheduler --master=127.0.0.1:8080 $(cat /scheduler_flags | tr '\n' ' ')\""
write_supervisor_conf "kube-apiserver" "${ulimit_command} /kubernetes/server/bin/kube-apiserver --insecure-bind-address=0.0.0.0 \
--etcd-servers=http://127.0.0.1:2379 \
--etcd-servers-overrides=/events#${EVENT_STORE_URL} \
--tls-cert-file=/srv/kubernetes/server.cert \
--tls-private-key-file=/srv/kubernetes/server.key \
--client-ca-file=/srv/kubernetes/ca.crt \
--token-auth-file=/srv/kubernetes/known_tokens.csv \
--secure-port=443 \
--basic-auth-file=/srv/kubernetes/basic_auth.csv \
--target-ram-mb=$((${NUM_NODES} * 60)) \
$(cat /apiserver_flags | tr '\n' ' ')\""
write_supervisor_conf "kube-contoller-manager" "${ulimit_command} /kubernetes/server/bin/kube-controller-manager \
--master=127.0.0.1:8080 \
--service-account-private-key-file=/srv/kubernetes/server.key \
--root-ca-file=/srv/kubernetes/ca.crt \
$(cat /controllers_flags | tr '\n' ' ')\""
supervisorctl reread
supervisorctl update
until [ "$(curl 127.0.0.1:8080/healthz 2> /dev/null)" == "ok" ]; do
sleep 1
done

307
vendor/k8s.io/kubernetes/test/kubemark/start-kubemark.sh generated vendored Executable file
View file

@ -0,0 +1,307 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script that creates a Kubemark cluster with Master running on GCE.
# Hack to make it work for OS X. Ugh...
TMP_ROOT="$(dirname "${BASH_SOURCE}")/../.."
KUBE_ROOT=$(readlink -e ${TMP_ROOT} 2> /dev/null || perl -MCwd -e 'print Cwd::abs_path shift' ${TMP_ROOT})
source "${KUBE_ROOT}/test/kubemark/common.sh"
function writeEnvironmentFiles() {
cat > "${RESOURCE_DIRECTORY}/apiserver_flags" <<EOF
${APISERVER_TEST_ARGS}
--storage-backend=${STORAGE_BACKEND}
--service-cluster-ip-range="${SERVICE_CLUSTER_IP_RANGE}"
EOF
if [ -z "${CUSTOM_ADMISSION_PLUGINS:-}" ]; then
cat >> "${RESOURCE_DIRECTORY}/apiserver_flags" <<EOF
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota
EOF
else
cat >> "${RESOURCE_DIRECTORY}/apiserver_flags" <<EOF
--admission-control=${CUSTOM_ADMISSION_PLUGINS}
EOF
fi
sed -i'' -e "s/\"//g" "${RESOURCE_DIRECTORY}/apiserver_flags"
cat > "${RESOURCE_DIRECTORY}/scheduler_flags" <<EOF
${SCHEDULER_TEST_ARGS}
EOF
sed -i'' -e "s/\"//g" "${RESOURCE_DIRECTORY}/scheduler_flags"
cat > "${RESOURCE_DIRECTORY}/controllers_flags" <<EOF
${CONTROLLER_MANAGER_TEST_ARGS}
--allocate-node-cidrs="${ALLOCATE_NODE_CIDRS}"
--cluster-cidr="${CLUSTER_IP_RANGE}"
--service-cluster-ip-range="${SERVICE_CLUSTER_IP_RANGE}"
--terminated-pod-gc-threshold="${TERMINATED_POD_GC_THRESHOLD}"
EOF
sed -i'' -e "s/\"//g" "${RESOURCE_DIRECTORY}/controllers_flags"
}
MAKE_DIR="${KUBE_ROOT}/cluster/images/kubemark"
KUBEMARK_BIN="$(kube::util::find-binary-for-platform kubemark linux/amd64)"
if [[ -z "${KUBEMARK_BIN}" ]]; then
echo 'Cannot find cmd/kubemark binary'
exit 1
fi
echo "Copying kubemark to ${MAKE_DIR}"
cp "${KUBEMARK_BIN}" "${MAKE_DIR}"
CURR_DIR=`pwd`
cd "${MAKE_DIR}"
RETRIES=3
for attempt in $(seq 1 ${RETRIES}); do
if ! make; then
if [[ $((attempt)) -eq "${RETRIES}" ]]; then
echo "${color_red}Make failed. Exiting.${color_norm}"
exit 1
fi
echo -e "${color_yellow}Make attempt $(($attempt)) failed. Retrying.${color_norm}" >& 2
sleep $(($attempt * 5))
else
break
fi
done
rm kubemark
cd $CURR_DIR
GCLOUD_COMMON_ARGS="--project ${PROJECT} --zone ${ZONE}"
run-gcloud-compute-with-retries disks create "${MASTER_NAME}-pd" \
${GCLOUD_COMMON_ARGS} \
--type "${MASTER_DISK_TYPE}" \
--size "${MASTER_DISK_SIZE}"
if [ "${EVENT_PD:-false}" == "true" ]; then
run-gcloud-compute-with-retries disks create "${MASTER_NAME}-event-pd" \
${GCLOUD_COMMON_ARGS} \
--type "${MASTER_DISK_TYPE}" \
--size "${MASTER_DISK_SIZE}"
fi
run-gcloud-compute-with-retries addresses create "${MASTER_NAME}-ip" \
--project "${PROJECT}" \
--region "${REGION}" -q
MASTER_IP=$(gcloud compute addresses describe "${MASTER_NAME}-ip" \
--project "${PROJECT}" --region "${REGION}" -q --format='value(address)')
run-gcloud-compute-with-retries instances create "${MASTER_NAME}" \
${GCLOUD_COMMON_ARGS} \
--address "${MASTER_IP}" \
--machine-type "${MASTER_SIZE}" \
--image-project="${MASTER_IMAGE_PROJECT}" \
--image "${MASTER_IMAGE}" \
--tags "${MASTER_TAG}" \
--network "${NETWORK}" \
--scopes "storage-ro,compute-rw,logging-write" \
--boot-disk-size "${MASTER_ROOT_DISK_SIZE}" \
--disk "name=${MASTER_NAME}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no"
if [ "${EVENT_PD:-false}" == "true" ]; then
echo "Attaching ${MASTER_NAME}-event-pd to ${MASTER_NAME}"
run-gcloud-compute-with-retries instances attach-disk "${MASTER_NAME}" \
${GCLOUD_COMMON_ARGS} \
--disk "${MASTER_NAME}-event-pd" \
--device-name="master-event-pd"
fi
run-gcloud-compute-with-retries firewall-rules create "${INSTANCE_PREFIX}-kubemark-master-https" \
--project "${PROJECT}" \
--network "${NETWORK}" \
--source-ranges "0.0.0.0/0" \
--target-tags "${MASTER_TAG}" \
--allow "tcp:443"
ensure-temp-dir
gen-kube-bearertoken
create-certs ${MASTER_IP}
KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
echo "${CA_CERT_BASE64}" | base64 --decode > "${RESOURCE_DIRECTORY}/ca.crt"
echo "${KUBECFG_CERT_BASE64}" | base64 --decode > "${RESOURCE_DIRECTORY}/kubecfg.crt"
echo "${KUBECFG_KEY_BASE64}" | base64 --decode > "${RESOURCE_DIRECTORY}/kubecfg.key"
until gcloud compute ssh --zone="${ZONE}" --project="${PROJECT}" "${MASTER_NAME}" --command="ls" &> /dev/null; do
sleep 1
done
password=$(python -c 'import string,random; print("".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16)))')
gcloud compute ssh --zone="${ZONE}" --project="${PROJECT}" "${MASTER_NAME}" \
--command="sudo mkdir /srv/kubernetes -p && \
sudo bash -c \"echo ${MASTER_CERT_BASE64} | base64 --decode > /srv/kubernetes/server.cert\" && \
sudo bash -c \"echo ${MASTER_KEY_BASE64} | base64 --decode > /srv/kubernetes/server.key\" && \
sudo bash -c \"echo ${CA_CERT_BASE64} | base64 --decode > /srv/kubernetes/ca.crt\" && \
sudo bash -c \"echo ${KUBECFG_CERT_BASE64} | base64 --decode > /srv/kubernetes/kubecfg.crt\" && \
sudo bash -c \"echo ${KUBECFG_KEY_BASE64} | base64 --decode > /srv/kubernetes/kubecfg.key\" && \
sudo bash -c \"echo \"${KUBE_BEARER_TOKEN},admin,admin\" > /srv/kubernetes/known_tokens.csv\" && \
sudo bash -c \"echo \"${KUBELET_TOKEN},kubelet,kubelet\" >> /srv/kubernetes/known_tokens.csv\" && \
sudo bash -c \"echo \"${KUBE_PROXY_TOKEN},kube_proxy,kube_proxy\" >> /srv/kubernetes/known_tokens.csv\" && \
sudo bash -c \"echo ${password},admin,admin > /srv/kubernetes/basic_auth.csv\""
writeEnvironmentFiles
gcloud compute copy-files --zone="${ZONE}" --project="${PROJECT}" \
"${SERVER_BINARY_TAR}" \
"${KUBEMARK_DIRECTORY}/start-kubemark-master.sh" \
"${KUBEMARK_DIRECTORY}/configure-kubectl.sh" \
"${RESOURCE_DIRECTORY}/apiserver_flags" \
"${RESOURCE_DIRECTORY}/scheduler_flags" \
"${RESOURCE_DIRECTORY}/controllers_flags" \
"root@${MASTER_NAME}":/
gcloud compute ssh "${MASTER_NAME}" --zone="${ZONE}" --project="${PROJECT}" \
--command="sudo chmod a+x /configure-kubectl.sh && sudo chmod a+x /start-kubemark-master.sh && \
sudo /start-kubemark-master.sh ${EVENT_STORE_IP:-127.0.0.1} ${NUM_NODES:-0} ${EVENT_PD:-false} ${ETCD_IMAGE:-}"
# create kubeconfig for Kubelet:
KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
kind: Config
users:
- name: kubelet
user:
client-certificate-data: "${KUBELET_CERT_BASE64}"
client-key-data: "${KUBELET_KEY_BASE64}"
clusters:
- name: kubemark
cluster:
certificate-authority-data: "${CA_CERT_BASE64}"
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: kubelet
name: kubemark-context
current-context: kubemark-context" | base64 | tr -d "\n\r")
KUBECONFIG_SECRET="${RESOURCE_DIRECTORY}/kubeconfig_secret.json"
cat > "${KUBECONFIG_SECRET}" << EOF
{
"apiVersion": "v1",
"kind": "Secret",
"metadata": {
"name": "kubeconfig"
},
"type": "Opaque",
"data": {
"kubeconfig": "${KUBECONFIG_CONTENTS}"
}
}
EOF
NODE_CONFIGMAP="${RESOURCE_DIRECTORY}/node_config_map.json"
cat > "${NODE_CONFIGMAP}" << EOF
{
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {
"name": "node-configmap"
},
"data": {
"content.type": "${TEST_CLUSTER_API_CONTENT_TYPE}"
}
}
EOF
LOCAL_KUBECONFIG="${RESOURCE_DIRECTORY}/kubeconfig.kubemark"
cat > "${LOCAL_KUBECONFIG}" << EOF
apiVersion: v1
kind: Config
users:
- name: admin
user:
client-certificate-data: "${KUBECFG_CERT_BASE64}"
client-key-data: "${KUBECFG_KEY_BASE64}"
username: admin
password: admin
clusters:
- name: kubemark
cluster:
certificate-authority-data: "${CA_CERT_BASE64}"
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: admin
name: kubemark-context
current-context: kubemark-context
EOF
sed "s/##numreplicas##/${NUM_NODES:-10}/g" "${RESOURCE_DIRECTORY}/hollow-node_template.json" > "${RESOURCE_DIRECTORY}/hollow-node.json"
sed -i'' -e "s/##project##/${PROJECT}/g" "${RESOURCE_DIRECTORY}/hollow-node.json"
mkdir "${RESOURCE_DIRECTORY}/addons" || true
sed "s/##MASTER_IP##/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/heapster_template.json" > "${RESOURCE_DIRECTORY}/addons/heapster.json"
metrics_mem_per_node=4
metrics_mem=$((200 + ${metrics_mem_per_node}*${NUM_NODES:-10}))
sed -i'' -e "s/##METRICS_MEM##/${metrics_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
eventer_mem_per_node=500
eventer_mem=$((200 * 1024 + ${eventer_mem_per_node}*${NUM_NODES:-10}))
sed -i'' -e "s/##EVENTER_MEM##/${eventer_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/kubemark-ns.json"
"${KUBECTL}" create -f "${KUBECONFIG_SECRET}" --namespace="kubemark"
"${KUBECTL}" create -f "${NODE_CONFIGMAP}" --namespace="kubemark"
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/addons" --namespace="kubemark"
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/hollow-node.json" --namespace="kubemark"
rm "${KUBECONFIG_SECRET}"
rm "${NODE_CONFIGMAP}"
echo "Waiting for all HollowNodes to become Running..."
start=$(date +%s)
nodes=$("${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node) || true
ready=$(($(echo "${nodes}" | grep -v "NotReady" | wc -l) - 1))
until [[ "${ready}" -ge "${NUM_NODES}" ]]; do
echo -n .
sleep 1
now=$(date +%s)
# Fail it if it already took more than 30 minutes.
if [ $((now - start)) -gt 1800 ]; then
echo ""
echo "Timeout waiting for all HollowNodes to become Running"
# Try listing nodes again - if it fails it means that API server is not responding
if "${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node &> /dev/null; then
echo "Found only ${ready} ready Nodes while waiting for ${NUM_NODES}."
else
echo "Got error while trying to list Nodes. Probably API server is down."
fi
pods=$("${KUBECTL}" get pods --namespace=kubemark) || true
running=$(($(echo "${pods}" | grep "Running" | wc -l)))
echo "${running} HollowNode pods are reported as 'Running'"
not_running=$(($(echo "${pods}" | grep -v "Running" | wc -l) - 1))
echo "${not_running} HollowNode pods are reported as NOT 'Running'"
echo $(echo "${pods}" | grep -v "Running")
exit 1
fi
nodes=$("${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node) || true
ready=$(($(echo "${nodes}" | grep -v "NotReady" | wc -l) - 1))
done
echo ""
echo "Master IP: ${MASTER_IP}"
echo "Password to kubemark master: ${password}"
echo "Kubeconfig for kubemark master is written in ${LOCAL_KUBECONFIG}"

62
vendor/k8s.io/kubernetes/test/kubemark/stop-kubemark.sh generated vendored Executable file
View file

@ -0,0 +1,62 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script that destroys Kubemark clusters and deletes all GCE resources created for Master
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/test/kubemark/common.sh"
"${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/hollow-kubelet.json" &> /dev/null || true
"${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/addons" &> /dev/null || true
"${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/kubemark-ns.json" &> /dev/null || true
rm -rf "${RESOURCE_DIRECTORY}/addons"
GCLOUD_COMMON_ARGS="--project ${PROJECT} --zone ${ZONE} --quiet"
gcloud compute instances delete "${MASTER_NAME}" \
${GCLOUD_COMMON_ARGS} || true
gcloud compute disks delete "${MASTER_NAME}-pd" \
${GCLOUD_COMMON_ARGS} || true
gcloud compute disks delete "${MASTER_NAME}-event-pd" \
${GCLOUD_COMMON_ARGS} &> /dev/null || true
gcloud compute addresses delete "${MASTER_NAME}-ip" \
--project "${PROJECT}" \
--region "${REGION}" \
--quiet || true
gcloud compute firewall-rules delete "${INSTANCE_PREFIX}-kubemark-master-https" \
--project "${PROJECT}" \
--quiet || true
if [ "${SEPARATE_EVENT_MACHINE:-false}" == "true" ]; then
gcloud compute instances delete "${EVENT_STORE_NAME}" \
${GCLOUD_COMMON_ARGS} || true
gcloud compute disks delete "${EVENT_STORE_NAME}-pd" \
${GCLOUD_COMMON_ARGS} || true
fi
rm -rf "${RESOURCE_DIRECTORY}/addons" "${RESOURCE_DIRECTORY}/kubeconfig.kubemark" &> /dev/null || true
rm "${RESOURCE_DIRECTORY}/ca.crt" \
"${RESOURCE_DIRECTORY}/kubecfg.crt" \
"${RESOURCE_DIRECTORY}/kubecfg.key" \
"${RESOURCE_DIRECTORY}/hollow-node.json" \
"${RESOURCE_DIRECTORY}/apiserver_flags" \
"${RESOURCE_DIRECTORY}/controllers_flags" \
"${RESOURCE_DIRECTORY}/scheduler_flags" &> /dev/null || true