Skip to content

Instantly share code, notes, and snippets.

@spuranam
Last active March 22, 2019 09:01
Show Gist options
  • Save spuranam/e8584e6cac75663814c90ba7b58cba06 to your computer and use it in GitHub Desktop.
Save spuranam/e8584e6cac75663814c90ba7b58cba06 to your computer and use it in GitHub Desktop.
# This ConfigMap is used to configure a self-hosted Calico installation.
# Documentation: http://docs.projectcalico.org/v1.6/getting-started/kubernetes/installation/hosted/
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# Configure this with the location of your etcd cluster.
# etcd_endpoints: "$ETCDENDPOINTS"
etcd_endpoints: "https://etcd1.k8slab.local:2379"
# Configure the Calico backend to use.
calico_backend: "none"
# Config key needed for calico-cni
cni_network_config: |-
{
"name": "calico",
"type": "flannel",
"delegate": {
"type": "calico",
"etcd_endpoints": "__ETCD_ENDPOINTS__",
"etcd_key_file": "__ETCD_KEY_FILE__",
"etcd_cert_file": "__ETCD_CERT_FILE__",
"etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
"log_level": "info",
"policy": {
"type": "k8s",
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"kubeconfig": "/etc/kubernetes/cni/net.d/__KUBECONFIG_FILENAME__"
}
}
}
# You must also populate the Secret below with these files.
etcd_ca: "/calico-secrets/etcd-ca"
etcd_cert: "/calico-secrets/etcd-cert"
etcd_key: "/calico-secrets/etcd-key"
---
# The following contains k8s Secrets for use with a TLS enabled etcd cluster.
# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: calico-etcd-secrets
namespace: kube-system
data:
# This self-hosted install expects three files with the following names. The values
# should be base64 encoded strings of the entire contents of each file.
etcd-key: <BASE64_ENCODE>
etcd-cert: <BASE64_ENCODE>
etcd-ca: <BASE64_ENCODE>
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
template:
metadata:
labels:
k8s-app: calico-node
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key":"CriticalAddonsOnly", "operator":"Exists"}]
spec:
hostNetwork: true
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v0.23.0
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
# value: "none"
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
- name: NO_DEFAULT_POOLS
value: "true"
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
securityContext:
privileged: true
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /etc/resolv.conf
name: dns
readOnly: true
- mountPath: /calico-secrets
name: etcd-certs
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
# Documentation: https://github.com/projectcalico/calico-cni/blob/master/k8s-install/scripts/install-cni.sh
- name: install-cni
image: quay.io/calico/cni:v1.5.2
imagePullPolicy: Always
command: ["/install-cni.sh"]
env:
# We set this to something that won't be read as we are using per
# host cni conf files
- name: CNI_CONF_NAME
value: "10-calico.conf"
# The directory on the host where CNI networks are installed.
# This is used for populating absolute paths in the CNI network
# config to assets which are installed in the CNI network config
# directory.
- name: CNI_NET_DIR
value: "/etc/kubernetes/cni/net.d"
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- mountPath: /calico-secrets
name: etcd-certs
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/kubernetes/cni/net.d
- name: dns
hostPath:
path: /etc/resolv.conf
# Mount in the etcd TLS secrets.
- name: etcd-certs
secret:
secretName: calico-etcd-secrets
---
# This manifest deploys the Calico policy controller on Kubernetes.
# See https://github.com/projectcalico/k8s-policy
apiVersion: extensions/v1beta1
kind: ReplicaSet
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
spec:
# The policy controller can only have a single active instance.
replicas: 1
template:
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key":"CriticalAddonsOnly", "operator":"Exists"}]
spec:
# The policy controller must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
containers:
- name: calico-policy-controller
image: calico/kube-policy-controller:v0.4.0
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
# The location of the Kubernetes API. Use the default Kubernetes
# service for API access.
- name: K8S_API
value: "https://kubernetes.default:443"
# Since we're running in the host namespace and might not have KubeDNS
# access, configure the container's /etc/hosts to resolve
# kubernetes.default to the correct service clusterIP.
- name: CONFIGURE_ETC_HOSTS
value: "true"
volumeMounts:
# Mount in the etcd TLS secrets.
- mountPath: /calico-secrets
name: etcd-certs
volumes:
# Mount in the etcd TLS secrets.
- name: etcd-certs
secret:
secretName: calico-etcd-secrets
#!/bin/bash
set -e
# List of etcd servers (http://ip:port), comma separated
export ETCD_ENDPOINTS=
# Specify the version (vX.Y.Z) of Kubernetes assets to deploy
export K8S_VER=v1.4.6_coreos.0
# Hyperkube image repository to use.
export HYPERKUBE_IMAGE_REPO=quay.io/coreos/hyperkube
# The CIDR network to use for pod IPs.
# Each pod launched in the cluster will be assigned an IP out of this range.
# Each node will be configured such that these IPs will be routable using the flannel overlay network.
export POD_NETWORK=10.2.0.0/16
# The CIDR network to use for service cluster IPs.
# Each service will be assigned a cluster IP out of this range.
# This must not overlap with any IP ranges assigned to the POD_NETWORK, or other existing network infrastructure.
# Routing to these IPs is handled by a proxy service local to each node, and are not required to be routable between nodes.
export SERVICE_IP_RANGE=10.3.0.0/24
# The IP address of the Kubernetes API Service
# If the SERVICE_IP_RANGE is changed above, this must be set to the first IP in that range.
export K8S_SERVICE_IP=10.3.0.1
# The IP address of the cluster DNS service.
# This IP must be in the range of the SERVICE_IP_RANGE and cannot be the first IP in the range.
# This same IP must be configured on all worker nodes to enable DNS service discovery.
export DNS_SERVICE_IP=10.3.0.10
# Whether to use Calico for Kubernetes network policy.
export USE_CALICO=false
# Determines the container runtime for kubernetes to use. Accepts 'docker' or 'rkt'.
export CONTAINER_RUNTIME=docker
# The above settings can optionally be overridden using an environment file:
ENV_FILE=/run/coreos-kubernetes/options.env
# -------------
function init_config {
local REQUIRED=('ADVERTISE_IP' 'POD_NETWORK' 'ETCD_ENDPOINTS' 'SERVICE_IP_RANGE' 'K8S_SERVICE_IP' 'DNS_SERVICE_IP' 'K8S_VER' 'HYPERKUBE_IMAGE_REPO' 'USE_CALICO')
if [ -f $ENV_FILE ]; then
export $(cat $ENV_FILE | egrep -v "^\s*(#|$)" | xargs)
fi
if [ -z $ADVERTISE_IP ]; then
export ADVERTISE_IP=$(awk -F= '/COREOS_PUBLIC_IPV4/ {print $2}' /etc/environment)
fi
for REQ in "${REQUIRED[@]}"; do
if [ -z "$(eval echo \$$REQ)" ]; then
echo "Missing required config value: ${REQ}"
exit 1
fi
done
}
function create_token() {
# echo $(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null)
echo $(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
}
# https://github.com/coreos/bugs/issues/1642
# https://github.com/coreos/bugs/issues/1659
# https://github.com/coreos/bugs/issues/473
function init_flannel {
echo "Waiting for etcd..."
while true
do
IFS=',' read -ra ES <<< "$ETCD_ENDPOINTS_NETWORK"
for ETCD in "${ES[@]}"; do
echo "Trying: $ETCD"
if [ -n "$(curl --cacert /etc/kubernetes/ssl/ca.pem --key /etc/kubernetes/ssl/etcd-client-key.pem --cert /etc/kubernetes/ssl/etcd-client.pem --silent "$ETCD/v2/machines")" ]; then
local ACTIVE_ETCD=$ETCD
break
fi
sleep 1
done
if [ -n "$ACTIVE_ETCD" ]; then
break
fi
done
RES=$(curl --silent --cacert /etc/kubernetes/ssl/ca.pem --key /etc/kubernetes/ssl/etcd-client-key.pem --cert /etc/kubernetes/ssl/etcd-client.pem -X PUT -d "value={\"Network\":\"$POD_NETWORK\",\"Backend\":{\"Type\":\"vxlan\"}}" "$ACTIVE_ETCD/v2/keys/coreos.com/network/config?prevExist=false")
if [ -z "$(echo $RES | grep '"action":"create"')" ] && [ -z "$(echo $RES | grep 'Key already exists')" ]; then
echo "Unexpected error configuring flannel pod network: $RES"
fi
}
function init_templates {
local TEMPLATE=/etc/kubernetes/kubeconfig.yaml
local uuid_file="/var/run/kubelet-pod.uuid"
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
apiVersion: v1
kind: Config
clusters:
- name: local
cluster:
server: http://localhost:8080
certificate-authority: /etc/kubernetes/ssl/ca.pem
users:
- name: kubelet
user:
client-certificate: /etc/kubernetes/ssl/apiserver.pem
client-key: /etc/kubernetes/ssl/apiserver-key.pem
contexts:
- context:
cluster: local
user: kubelet
name: kubelet-context
current-context: kubelet-context
EOF
chmod 0644 $TEMPLATE
chown core:core $TEMPLATE
fi
local TEMPLATE=/etc/systemd/system/kubelet.service
local uuid_file="/var/run/kubelet-pod.uuid"
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
[Unit]
Description=Kubernetes Kubelet via Hyperkube ACI
Documentation=http://kubernetes.io/docs/admin/kubelet
Documentation=https://coreos.com/kubernetes/docs/latest/kubelet-wrapper.html
[Service]
Environment=KUBELET_VERSION=${K8S_VER}
Environment=KUBELET_ACI=docker://$DOCKER_BOOTSTRAP_REPO/${HYPERKUBE_IMAGE_REPO}
Environment="RKT_OPTS=--insecure-options=image \\
--uuid-file-save=${uuid_file} \\
--set-env=ETCD_CA_CERT_FILE=/etc/kubernetes/ssl/ca.pem \\
--set-env=ETCD_CERT_FILE=/etc/kubernetes/ssl/etcd-client.pem \\
--set-env=ETCD_KEY_FILE=/etc/kubernetes/ssl/etcd-client-key.pem \\
--volume iscsiadm,kind=host,source=/usr/sbin/iscsiadm \\
--mount volume=iscsiadm,target=/usr/sbin/iscsiadm \\
--volume modprobe,kind=host,source=/usr/sbin/modprobe \\
--mount volume=modprobe,target=/usr/sbin/modprobe \\
--volume lib-modules,kind=host,source=/lib/modules \\
--mount volume=lib-modules,target=/lib/modules \\
--volume cni-bin,kind=host,source=/opt/cni/bin \\
--mount volume=cni-bin,target=/opt/cni/bin \\
--volume etc-hosts,kind=host,source=/etc/hosts,readOnly=true \\
--mount volume=etc-hosts,target=/etc/hosts \\
--volume dns,kind=host,source=/etc/resolv.conf \\
--mount volume=dns,target=/etc/resolv.conf \\
--volume rkt,kind=host,source=/opt/bin/host-rkt \\
--mount volume=rkt,target=/usr/bin/rkt \\
--volume var-lib-rkt,kind=host,source=/var/lib/rkt \\
--mount volume=var-lib-rkt,target=/var/lib/rkt \\
--volume stage,kind=host,source=/tmp \\
--mount volume=stage,target=/tmp \\
--volume var-log,kind=host,source=/var/log \\
--mount volume=var-log,target=/var/log"
ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/usr/bin/mkdir -p /opt/cni/bin
ExecStartPre=/usr/bin/mkdir -p /var/log/containers
ExecStartPre=-/usr/bin/rkt rm --uuid-file=${uuid_file}
ExecStart=/usr/lib/coreos/kubelet-wrapper \\
--api-servers=http://127.0.0.1:8080 \\
--register-schedulable=false \\
--cni-conf-dir=/etc/kubernetes/cni/net.d \\
--network-plugin=cni \\
--container-runtime=${CONTAINER_RUNTIME} \\
--rkt-path=/usr/bin/rkt \\
--rkt-stage1-image=coreos.com/rkt/stage1-coreos \\
--allow-privileged=true \\
--pod-manifest-path=/etc/kubernetes/manifests \\
--hostname-override=${ADVERTISE_IP} \\
--cluster_dns=${DNS_SERVICE_IP} \\
--cluster_domain=cluster.local \\
--node-labels=role=master \\
--kubeconfig=/etc/kubernetes/kubeconfig.yaml \\
--require-kubeconfig \\
--feature-gates=AllAlpha=true
ExecStop=-/usr/bin/rkt stop --uuid-file=${uuid_file}
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
fi
local TEMPLATE=/opt/bin/host-rkt
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
#!/bin/sh
# This is bind mounted into the kubelet rootfs and all rkt shell-outs go
# through this rkt wrapper. It essentially enters the host mount namespace
# (which it is already in) only for the purpose of breaking out of the chroot
# before calling rkt. It makes things like rkt gc work and avoids bind mounting
# in certain rkt filesystem dependancies into the kubelet rootfs. This can
# eventually be obviated when the write-api stuff gets upstream and rkt gc is
# through the api-server. Related issue:
# https://github.com/coreos/rkt/issues/2878
exec nsenter -m -u -i -n -p -t 1 -- /usr/bin/rkt "\$@"
EOF
fi
local TEMPLATE=/etc/systemd/system/load-rkt-stage1.service
if [ ${CONTAINER_RUNTIME} = "rkt" ] && [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
[Unit]
Description=Load rkt stage1 images
Documentation=http://github.com/coreos/rkt
Requires=network-online.target
After=network-online.target
Before=rkt-api.service
[Service]
RemainAfterExit=yes
Type=oneshot
ExecStart=/usr/bin/rkt fetch /usr/lib/rkt/stage1-images/stage1-coreos.aci /usr/lib/rkt/stage1-images/stage1-fly.aci --insecure-options=image
[Install]
RequiredBy=rkt-api.service
EOF
fi
local TEMPLATE=/etc/systemd/system/rkt-api.service
if [ ${CONTAINER_RUNTIME} = "rkt" ] && [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
[Unit]
Before=kubelet.service
[Service]
ExecStart=/usr/bin/rkt api-service
Restart=always
RestartSec=10
[Install]
RequiredBy=kubelet.service
EOF
fi
local TEMPLATE=/etc/kubernetes/manifests/kube-proxy.yaml
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
# Documentation: http://kubernetes.io/docs/admin/kube-proxy/
apiVersion: v1
kind: Pod
metadata:
name: kube-proxy
namespace: kube-system
annotations:
rkt.alpha.kubernetes.io/stage1-name-override: coreos.com/rkt/stage1-fly
spec:
hostNetwork: true
containers:
- name: kube-proxy
image: $DOCKER_BOOTSTRAP_REPO/${HYPERKUBE_IMAGE_REPO}:${K8S_VER}
command:
- /hyperkube
- proxy
- --master=http://127.0.0.1:8080
- --proxy-mode=iptables
- --cluster-cidr=${POD_NETWORK}
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
- mountPath: /var/run/dbus
name: dbus
readOnly: false
volumes:
- hostPath:
path: /usr/share/ca-certificates
name: ssl-certs-host
- hostPath:
path: /var/run/dbus
name: dbus
EOF
fi
local TEMPLATE=/etc/kubernetes/manifests/kube-apiserver.yaml
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
# Documentation: http://kubernetes.io/docs/admin/kube-apiserver/
apiVersion: v1
kind: Pod
metadata:
name: kube-apiserver
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: kube-apiserver
image: $DOCKER_BOOTSTRAP_REPO/${HYPERKUBE_IMAGE_REPO}:${K8S_VER}
command:
- /hyperkube
- apiserver
- --bind-address=0.0.0.0
- --etcd-servers=${ETCD_ENDPOINTS}
- --etcd-cafile=/etc/kubernetes/ssl/ca.pem
- --etcd-certfile=/etc/kubernetes/ssl/etcd-client.pem
- --etcd-keyfile=/etc/kubernetes/ssl/etcd-client-key.pem
- --allow-privileged=true
- --service-cluster-ip-range=${SERVICE_IP_RANGE}
- --secure-port=443
- --advertise-address=${ADVERTISE_IP}
- --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota
- --tls-cert-file=/etc/kubernetes/ssl/apiserver.pem
- --tls-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem
- --client-ca-file=/etc/kubernetes/ssl/ca.pem
- --service-account-lookup=true
- --service-account-key-file=/etc/kubernetes/ssl/apiserver-key.pem
- --authorization-mode=RBAC
- --authorization-rbac-super-user=admin
- --runtime-config=extensions/v1beta1=true,rbac.authorization.k8s.io/v1alpha1=true,extensions/v1beta1/networkpolicies=true
- --feature-gates=AllAlpha=true
- --cloud-provider=''
- --v=2
livenessProbe:
httpGet:
host: 127.0.0.1
port: 8080
path: /healthz
initialDelaySeconds: 15
timeoutSeconds: 15
ports:
- containerPort: 443
hostPort: 443
name: https
- containerPort: 8080
hostPort: 8080
name: local
volumeMounts:
- mountPath: /etc/kubernetes/ssl
name: ssl-certs-kubernetes
readOnly: true
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
volumes:
- hostPath:
path: /etc/kubernetes/ssl
name: ssl-certs-kubernetes
- hostPath:
path: /usr/share/ca-certificates
name: ssl-certs-host
EOF
fi
local TEMPLATE=/etc/kubernetes/manifests/kube-controller-manager.yaml
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
# Documentation: http://kubernetes.io/docs/admin/kube-controller-manager/
apiVersion: v1
kind: Pod
metadata:
name: kube-controller-manager
namespace: kube-system
spec:
containers:
- name: kube-controller-manager
image: $DOCKER_BOOTSTRAP_REPO/${HYPERKUBE_IMAGE_REPO}:${K8S_VER}
command:
- /hyperkube
- controller-manager
- --master=http://127.0.0.1:8080
- --leader-elect=true
- --service-account-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem
- --root-ca-file=/etc/kubernetes/ssl/ca.pem
- --feature-gates=AllAlpha=true
resources:
requests:
cpu: 200m
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 10252
initialDelaySeconds: 15
timeoutSeconds: 15
volumeMounts:
- mountPath: /etc/kubernetes/ssl
name: ssl-certs-kubernetes
readOnly: true
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
hostNetwork: true
volumes:
- hostPath:
path: /etc/kubernetes/ssl
name: ssl-certs-kubernetes
- hostPath:
path: /usr/share/ca-certificates
name: ssl-certs-host
EOF
fi
local TEMPLATE=/etc/kubernetes/manifests/kube-scheduler.yaml
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
# Documentation: http://kubernetes.io/docs/admin/kube-scheduler/
apiVersion: v1
kind: Pod
metadata:
name: kube-scheduler
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: kube-scheduler
image: $DOCKER_BOOTSTRAP_REPO/${HYPERKUBE_IMAGE_REPO}:${K8S_VER}
command:
- /hyperkube
- scheduler
- --master=http://127.0.0.1:8080
- --leader-elect=true
- --feature-gates=AllAlpha=true
resources:
requests:
cpu: 100m
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 10251
initialDelaySeconds: 15
timeoutSeconds: 15
EOF
fi
local TEMPLATE=/etc/flannel/options.env
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
FLANNELD_IFACE=$private_ipv4
FLANNELD_ETCD_ENDPOINTS=$ETCD_ENDPOINTS_NETWORK
FLANNELD_ETCD_CAFILE=/etc/kubernetes/ssl/ca.pem
FLANNELD_ETCD_CERTFILE=/etc/kubernetes/ssl/etcd-client.pem
FLANNELD_ETCD_KEYFILE=/etc/kubernetes/ssl/etcd-client-key.pem
EOF
fi
local TEMPLATE=/etc/systemd/system/flanneld.service.d/40-ExecStartPre-symlink.conf.conf
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
[Service]
Environment="ETCD_SSL_DIR=/etc/kubernetes/ssl"
ExecStartPre=/usr/bin/ln -sf /etc/flannel/options.env /run/flannel/options.env
TimeoutStartSec=120
EOF
fi
local TEMPLATE=/etc/systemd/system/docker.service.d/40-flannel.conf
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
[Unit]
Requires=flanneld.service
After=flanneld.service
[Service]
EnvironmentFile=/etc/kubernetes/cni/docker_opts_cni.env
EOF
fi
local TEMPLATE=/etc/kubernetes/cni/docker_opts_cni.env
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
DOCKER_OPT_BIP=""
DOCKER_OPT_IPMASQ=""
EOF
fi
local TEMPLATE=/etc/kubernetes/cni/net.d/10-flannel.conf
if [ "${USE_CALICO}" = "false" ] && [ ! -f "${TEMPLATE}" ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
{
"name": "podnet",
"type": "flannel",
"delegate": {
"isDefaultGateway": true
}
}
EOF
fi
}
function start_addons {
echo "Waiting for Kubernetes API..."
until curl --silent "http://127.0.0.1:8080/version"
do
sleep 5
done
echo
}
function wait_for_apiserver {
echo "Waiting for Kubernetes API..."
# wait for the API
until curl --silent "http://127.0.0.1:8080/version/"
do
sleep 5
done
echo
}
init_config
init_templates
chmod +x /opt/bin/host-rkt
init_flannel
systemctl daemon-reload
if [ $CONTAINER_RUNTIME = "rkt" ]; then
systemctl enable load-rkt-stage1
systemctl enable rkt-api
fi
systemctl enable flanneld; systemctl start flanneld
systemctl enable kubelet; systemctl start kubelet
wait_for_apiserver
echo "DONE"
#!/bin/bash
set -e
# List of etcd servers (http://ip:port), comma separated
export ETCD_ENDPOINTS=
# The endpoint the worker node should use to contact controller nodes (https://ip:port)
# In HA configurations this should be an external DNS record or loadbalancer in front of the control nodes.
# However, it is also possible to point directly to a single control node.
export CONTROLLER_ENDPOINT=
# Specify the version (vX.Y.Z) of Kubernetes assets to deploy
export K8S_VER=v1.4.6_coreos.0
# Hyperkube image repository to use.
export HYPERKUBE_IMAGE_REPO=quay.io/coreos/hyperkube
# The IP address of the cluster DNS service.
# This must be the same DNS_SERVICE_IP used when configuring the controller nodes.
export DNS_SERVICE_IP=10.3.0.10
# Whether to use Calico for Kubernetes network policy.
export USE_CALICO=false
# Determines the container runtime for kubernetes to use. Accepts 'docker' or 'rkt'.
export CONTAINER_RUNTIME=docker
# The above settings can optionally be overridden using an environment file:
ENV_FILE=/run/coreos-kubernetes/options.env
# -------------
function init_config {
local REQUIRED=( 'ADVERTISE_IP' 'ETCD_ENDPOINTS' 'CONTROLLER_ENDPOINT' 'DNS_SERVICE_IP' 'K8S_VER' 'HYPERKUBE_IMAGE_REPO' 'USE_CALICO' )
if [ -f $ENV_FILE ]; then
export $(cat $ENV_FILE | egrep -v "^\s*(#|$)" | xargs)
fi
if [ -z $ADVERTISE_IP ]; then
export ADVERTISE_IP=$(awk -F= '/COREOS_PUBLIC_IPV4/ {print $2}' /etc/environment)
fi
for REQ in "${REQUIRED[@]}"; do
if [ -z "$(eval echo \$$REQ)" ]; then
echo "Missing required config value: ${REQ}"
exit 1
fi
done
}
function init_templates {
local TEMPLATE=/etc/systemd/system/kubelet.service
local uuid_file="/var/run/kubelet-pod.uuid"
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
[Unit]
Description=Kubernetes Kubelet via Hyperkube ACI
Documentation=http://kubernetes.io/docs/admin/kubelet
Documentation=https://coreos.com/kubernetes/docs/latest/kubelet-wrapper.html
[Service]
Environment=KUBELET_VERSION=${K8S_VER}
Environment=KUBELET_ACI=docker://$DOCKER_BOOTSTRAP_REPO/${HYPERKUBE_IMAGE_REPO}
Environment="RKT_OPTS=--insecure-options=image \\
--uuid-file-save=${uuid_file} \\
--set-env=ETCD_CA_CERT_FILE=/etc/kubernetes/ssl/ca.pem \\
--set-env=ETCD_CERT_FILE=/etc/kubernetes/ssl/etcd-client.pem \\
--set-env=ETCD_KEY_FILE=/etc/kubernetes/ssl/etcd-client-key.pem \\
--volume iscsiadm,kind=host,source=/usr/sbin/iscsiadm \\
--mount volume=iscsiadm,target=/usr/sbin/iscsiadm \\
--volume modprobe,kind=host,source=/usr/sbin/modprobe \\
--mount volume=modprobe,target=/usr/sbin/modprobe \\
--volume lib-modules,kind=host,source=/lib/modules \\
--mount volume=lib-modules,target=/lib/modules \\
--volume cni-bin,kind=host,source=/opt/cni/bin \\
--mount volume=cni-bin,target=/opt/cni/bin \\
--volume etc-hosts,kind=host,source=/etc/hosts,readOnly=true \\
--mount volume=etc-hosts,target=/etc/hosts \\
--volume dns,kind=host,source=/etc/resolv.conf \\
--mount volume=dns,target=/etc/resolv.conf \\
--volume rkt,kind=host,source=/opt/bin/host-rkt \\
--mount volume=rkt,target=/usr/bin/rkt \\
--volume var-lib-rkt,kind=host,source=/var/lib/rkt \\
--mount volume=var-lib-rkt,target=/var/lib/rkt \\
--volume stage,kind=host,source=/tmp \\
--mount volume=stage,target=/tmp \\
--volume var-log,kind=host,source=/var/log \\
--mount volume=var-log,target=/var/log"
ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/usr/bin/mkdir -p /opt/cni/bin
ExecStartPre=/usr/bin/mkdir -p /var/log/containers
ExecStartPre=-/usr/bin/rkt rm --uuid-file=${uuid_file}
ExecStart=/usr/lib/coreos/kubelet-wrapper \\
--api-servers=${CONTROLLER_ENDPOINT} \\
--cni-conf-dir=/etc/kubernetes/cni/net.d \\
--network-plugin=cni \\
--container-runtime=${CONTAINER_RUNTIME} \\
--rkt-path=/usr/bin/rkt \\
--rkt-stage1-image=coreos.com/rkt/stage1-coreos \
--register-node=true \\
--allow-privileged=true \\
--pod-manifest-path=/etc/kubernetes/manifests \\
--hostname-override=${ADVERTISE_IP} \\
--cluster_dns=${DNS_SERVICE_IP} \\
--cluster_domain=cluster.local \\
--node-labels=role=worker \\
--kubeconfig=/etc/kubernetes/kubeconfig.yaml \\
--tls-cert-file=/etc/kubernetes/ssl/worker.pem \\
--tls-private-key-file=/etc/kubernetes/ssl/worker-key.pem \\
--cloud-provider=''
--feature-gates=AllAlpha=true
ExecStop=-/usr/bin/rkt stop --uuid-file=${uuid_file}
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
fi
local TEMPLATE=/opt/bin/host-rkt
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
#!/bin/sh
# This is bind mounted into the kubelet rootfs and all rkt shell-outs go
# through this rkt wrapper. It essentially enters the host mount namespace
# (which it is already in) only for the purpose of breaking out of the chroot
# before calling rkt. It makes things like rkt gc work and avoids bind mounting
# in certain rkt filesystem dependancies into the kubelet rootfs. This can
# eventually be obviated when the write-api stuff gets upstream and rkt gc is
# through the api-server. Related issue:
# https://github.com/coreos/rkt/issues/2878
exec nsenter -m -u -i -n -p -t 1 -- /usr/bin/rkt "\$@"
EOF
fi
local TEMPLATE=/etc/systemd/system/load-rkt-stage1.service
if [ ${CONTAINER_RUNTIME} = "rkt" ] && [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
[Unit]
Description=Load rkt stage1 images
Documentation=http://github.com/coreos/rkt
Requires=network-online.target
After=network-online.target
Before=rkt-api.service
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStart=/usr/bin/rkt fetch /usr/lib/rkt/stage1-images/stage1-coreos.aci /usr/lib/rkt/stage1-images/stage1-fly.aci --insecure-options=image
[Install]
RequiredBy=rkt-api.service
EOF
fi
local TEMPLATE=/etc/systemd/system/rkt-api.service
if [ ${CONTAINER_RUNTIME} = "rkt" ] && [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
[Unit]
Before=kubelet.service
[Service]
ExecStart=/usr/bin/rkt api-service
Restart=always
RestartSec=10
[Install]
RequiredBy=kubelet.service
EOF
fi
local TEMPLATE=/etc/kubernetes/kubeconfig.yaml
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
#service-account-context
apiVersion: v1
kind: Config
clusters:
- name: local
cluster:
server: ${CONTROLLER_ENDPOINT}
certificate-authority: /etc/kubernetes/ssl/ca.pem
users:
- name: kubelet
user:
client-certificate: /etc/kubernetes/ssl/worker.pem
client-key: /etc/kubernetes/ssl/worker-key.pem
contexts:
- context:
cluster: local
user: kubelet
name: kubelet-context
current-context: kubelet-context
EOF
fi
local TEMPLATE=/etc/kubernetes/manifests/kube-proxy.yaml
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
apiVersion: v1
kind: Pod
metadata:
name: kube-proxy
namespace: kube-system
annotations:
rkt.alpha.kubernetes.io/stage1-name-override: coreos.com/rkt/stage1-fly
spec:
hostNetwork: true
containers:
- name: kube-proxy
image: $DOCKER_BOOTSTRAP_REPO/${HYPERKUBE_IMAGE_REPO}:${K8S_VER}
command:
- /hyperkube
- proxy
- --master=${CONTROLLER_ENDPOINT}
- --cluster-cidr=${POD_NETWORK}
- --kubeconfig=/etc/kubernetes/kubeconfig.yaml
- --proxy-mode=iptables
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/ssl/certs
name: "ssl-certs"
- mountPath: /etc/kubernetes/kubeconfig.yaml
name: "kubeconfig"
readOnly: true
- mountPath: /etc/kubernetes/ssl
name: "etc-kube-ssl"
readOnly: true
- mountPath: /var/run/dbus
name: dbus
readOnly: false
volumes:
- name: "ssl-certs"
hostPath:
path: "/usr/share/ca-certificates"
- name: "kubeconfig"
hostPath:
path: "/etc/kubernetes/kubeconfig.yaml"
- name: "etc-kube-ssl"
hostPath:
path: "/etc/kubernetes/ssl"
- hostPath:
path: /var/run/dbus
name: dbus
EOF
fi
local TEMPLATE=/etc/flannel/options.env
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
FLANNELD_IFACE=$private_ipv4
FLANNELD_ETCD_ENDPOINTS=$ETCD_ENDPOINTS_NETWORK
FLANNELD_ETCD_CAFILE=/etc/kubernetes/ssl/ca.pem
FLANNELD_ETCD_CERTFILE=/etc/kubernetes/ssl/etcd-client.pem
FLANNELD_ETCD_KEYFILE=/etc/kubernetes/ssl/etcd-client-key.pem
EOF
fi
local TEMPLATE=/etc/systemd/system/flanneld.service.d/40-ExecStartPre-symlink.conf.conf
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
[Service]
Environment="ETCD_SSL_DIR=/etc/kubernetes/ssl"
ExecStartPre=/usr/bin/ln -sf /etc/flannel/options.env /run/flannel/options.env
TimeoutStartSec=120
EOF
fi
local TEMPLATE=/etc/systemd/system/docker.service.d/40-flannel.conf
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
[Unit]
Requires=flanneld.service
After=flanneld.service
[Service]
EnvironmentFile=/etc/kubernetes/cni/docker_opts_cni.env
EOF
fi
local TEMPLATE=/etc/kubernetes/cni/docker_opts_cni.env
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
DOCKER_OPT_BIP=""
DOCKER_OPT_IPMASQ=""
EOF
fi
local TEMPLATE=/etc/kubernetes/cni/net.d/10-flannel.conf
if [ "${USE_CALICO}" = "false" ] && [ ! -f "${TEMPLATE}" ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
{
"name": "podnet",
"type": "flannel",
"delegate": {
"isDefaultGateway": true
}
}
EOF
fi
}
init_config
init_templates
chmod +x /opt/bin/host-rkt
systemctl daemon-reload
if [ $CONTAINER_RUNTIME = "rkt" ]; then
systemctl enable load-rkt-stage1
systemctl enable rkt-api
fi
systemctl enable flanneld; systemctl start flanneld
systemctl enable kubelet; systemctl start kubelet
#!/bin/bash
set -e
# List of etcd servers (http://ip:port), comma separated
export ETCD_ENDPOINTS=
# Specify the version (vX.Y.Z) of Kubernetes assets to deploy
export K8S_VER=v1.4.6_coreos.0
# Hyperkube image repository to use.
export HYPERKUBE_IMAGE_REPO=quay.io/coreos/hyperkube
# The CIDR network to use for pod IPs.
# Each pod launched in the cluster will be assigned an IP out of this range.
# Each node will be configured such that these IPs will be routable using the flannel overlay network.
export POD_NETWORK=10.2.0.0/16
# The CIDR network to use for service cluster IPs.
# Each service will be assigned a cluster IP out of this range.
# This must not overlap with any IP ranges assigned to the POD_NETWORK, or other existing network infrastructure.
# Routing to these IPs is handled by a proxy service local to each node, and are not required to be routable between nodes.
export SERVICE_IP_RANGE=10.3.0.0/24
# The IP address of the Kubernetes API Service
# If the SERVICE_IP_RANGE is changed above, this must be set to the first IP in that range.
export K8S_SERVICE_IP=10.3.0.1
# The IP address of the cluster DNS service.
# This IP must be in the range of the SERVICE_IP_RANGE and cannot be the first IP in the range.
# This same IP must be configured on all worker nodes to enable DNS service discovery.
export DNS_SERVICE_IP=10.3.0.10
# Whether to use Calico for Kubernetes network policy.
export USE_CALICO=false
# Determines the container runtime for kubernetes to use. Accepts 'docker' or 'rkt'.
export CONTAINER_RUNTIME=docker
# The above settings can optionally be overridden using an environment file:
ENV_FILE=/run/coreos-kubernetes/options.env
# -------------
function init_config {
local REQUIRED=('ADVERTISE_IP' 'POD_NETWORK' 'ETCD_ENDPOINTS' 'SERVICE_IP_RANGE' 'K8S_SERVICE_IP' 'DNS_SERVICE_IP' 'K8S_VER' 'HYPERKUBE_IMAGE_REPO' 'USE_CALICO')
if [ -f $ENV_FILE ]; then
export $(cat $ENV_FILE | egrep -v "^\s*(#|$)" | xargs)
fi
if [ -z $ADVERTISE_IP ]; then
export ADVERTISE_IP=$(awk -F= '/COREOS_PUBLIC_IPV4/ {print $2}' /etc/environment)
fi
for REQ in "${REQUIRED[@]}"; do
if [ -z "$(eval echo \$$REQ)" ]; then
echo "Missing required config value: ${REQ}"
exit 1
fi
done
}
function create_token() {
# echo $(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null)
echo $(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
}
# https://github.com/coreos/bugs/issues/1642
# https://github.com/coreos/bugs/issues/1659
# https://github.com/coreos/bugs/issues/473
function init_flannel {
echo "Waiting for etcd..."
while true
do
IFS=',' read -ra ES <<< "$ETCD_ENDPOINTS_NETWORK"
for ETCD in "${ES[@]}"; do
echo "Trying: $ETCD"
if [ -n "$(curl --cacert /etc/kubernetes/ssl/ca.pem --key /etc/kubernetes/ssl/etcd-client-key.pem --cert /etc/kubernetes/ssl/etcd-client.pem --silent "$ETCD/v2/machines")" ]; then
local ACTIVE_ETCD=$ETCD
break
fi
sleep 1
done
if [ -n "$ACTIVE_ETCD" ]; then
break
fi
done
RES=$(curl --silent --cacert /etc/kubernetes/ssl/ca.pem --key /etc/kubernetes/ssl/etcd-client-key.pem --cert /etc/kubernetes/ssl/etcd-client.pem -X PUT -d "value={\"Network\":\"$POD_NETWORK\",\"Backend\":{\"Type\":\"vxlan\"}}" "$ACTIVE_ETCD/v2/keys/coreos.com/network/config?prevExist=false")
if [ -z "$(echo $RES | grep '"action":"create"')" ] && [ -z "$(echo $RES | grep 'Key already exists')" ]; then
echo "Unexpected error configuring flannel pod network: $RES"
fi
}
function init_templates {
local TEMPLATE=/etc/kubernetes/apiserver-kubeconfig.yaml
local uuid_file="/var/run/kubelet-pod.uuid"
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
apiVersion: v1
kind: Config
clusters:
- name: local
cluster:
server: http://localhost:8080
certificate-authority: /etc/kubernetes/ssl/ca.pem
users:
- name: kubelet
user:
client-certificate: /etc/kubernetes/ssl/apiserver.pem
client-key: /etc/kubernetes/ssl/apiserver-key.pem
contexts:
- context:
cluster: local
user: kubelet
name: kubelet-context
current-context: kubelet-context
EOF
chmod 0644 $TEMPLATE
chown core:core $TEMPLATE
fi
local TEMPLATE=/etc/systemd/system/kubelet.service
local uuid_file="/var/run/kubelet-pod.uuid"
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
[Unit]
Description=Kubernetes Kubelet via Hyperkube ACI
Documentation=http://kubernetes.io/docs/admin/kubelet
Documentation=https://coreos.com/kubernetes/docs/latest/kubelet-wrapper.html
[Service]
Environment=KUBELET_VERSION=${K8S_VER}
Environment=KUBELET_ACI=docker://$DOCKER_BOOTSTRAP_REPO/${HYPERKUBE_IMAGE_REPO}
Environment="RKT_OPTS=--insecure-options=image \\
--uuid-file-save=${uuid_file} \\
--set-env=ETCD_CA_CERT_FILE=/etc/kubernetes/ssl/ca.pem \\
--set-env=ETCD_CERT_FILE=/etc/kubernetes/ssl/etcd-client.pem \\
--set-env=ETCD_KEY_FILE=/etc/kubernetes/ssl/etcd-client-key.pem \\
--volume iscsiadm,kind=host,source=/usr/sbin/iscsiadm \\
--mount volume=iscsiadm,target=/usr/sbin/iscsiadm \\
--volume modprobe,kind=host,source=/usr/sbin/modprobe \\
--mount volume=modprobe,target=/usr/sbin/modprobe \\
--volume lib-modules,kind=host,source=/lib/modules \\
--mount volume=lib-modules,target=/lib/modules \\
--volume etc-hosts,kind=host,source=/etc/hosts,readOnly=true \\
--mount volume=etc-hosts,target=/etc/hosts \\
--volume dns,kind=host,source=/etc/resolv.conf \\
--mount volume=dns,target=/etc/resolv.conf \\
--volume rkt,kind=host,source=/opt/bin/host-rkt \\
--mount volume=rkt,target=/usr/bin/rkt \\
--volume var-lib-rkt,kind=host,source=/var/lib/rkt \\
--mount volume=var-lib-rkt,target=/var/lib/rkt \\
--volume stage,kind=host,source=/tmp \\
--mount volume=stage,target=/tmp \\
--volume var-log,kind=host,source=/var/log \\
--mount volume=var-log,target=/var/log"
ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/usr/bin/mkdir -p /var/log/containers
ExecStartPre=-/usr/bin/rkt rm --uuid-file=${uuid_file}
ExecStart=/usr/lib/coreos/kubelet-wrapper \\
--api-servers=http://127.0.0.1:8080 \\
--register-schedulable=false \\
--cni-conf-dir=/etc/kubernetes/cni/net.d \\
--network-plugin=cni \\
--container-runtime=${CONTAINER_RUNTIME} \\
--rkt-path=/usr/bin/rkt \\
--rkt-stage1-image=coreos.com/rkt/stage1-coreos \\
--allow-privileged=true \\
--pod-manifest-path=/etc/kubernetes/manifests \\
--hostname-override=${ADVERTISE_IP} \\
--cluster_dns=${DNS_SERVICE_IP} \\
--cluster_domain=cluster.local \\
--node-labels=role=master \\
--kubeconfig=/etc/kubernetes/apiserver-kubeconfig.yaml \\
--require-kubeconfig \\
--feature-gates=AllAlpha=true
ExecStop=-/usr/bin/rkt stop --uuid-file=${uuid_file}
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
fi
local TEMPLATE=/opt/bin/host-rkt
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
#!/bin/sh
# This is bind mounted into the kubelet rootfs and all rkt shell-outs go
# through this rkt wrapper. It essentially enters the host mount namespace
# (which it is already in) only for the purpose of breaking out of the chroot
# before calling rkt. It makes things like rkt gc work and avoids bind mounting
# in certain rkt filesystem dependancies into the kubelet rootfs. This can
# eventually be obviated when the write-api stuff gets upstream and rkt gc is
# through the api-server. Related issue:
# https://github.com/coreos/rkt/issues/2878
exec nsenter -m -u -i -n -p -t 1 -- /usr/bin/rkt "\$@"
EOF
fi
local TEMPLATE=/etc/systemd/system/load-rkt-stage1.service
if [ ${CONTAINER_RUNTIME} = "rkt" ] && [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
[Unit]
Description=Load rkt stage1 images
Documentation=http://github.com/coreos/rkt
Requires=network-online.target
After=network-online.target
Before=rkt-api.service
[Service]
RemainAfterExit=yes
Type=oneshot
ExecStart=/usr/bin/rkt fetch /usr/lib/rkt/stage1-images/stage1-coreos.aci /usr/lib/rkt/stage1-images/stage1-fly.aci --insecure-options=image
[Install]
RequiredBy=rkt-api.service
EOF
fi
local TEMPLATE=/etc/systemd/system/rkt-api.service
if [ ${CONTAINER_RUNTIME} = "rkt" ] && [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
[Unit]
Before=kubelet.service
[Service]
ExecStart=/usr/bin/rkt api-service
Restart=always
RestartSec=10
[Install]
RequiredBy=kubelet.service
EOF
fi
local TEMPLATE=/etc/systemd/system/calico-node.service
if [ "${USE_CALICO}" = "true" ] && [ ! -f "${TEMPLATE}" ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
[Unit]
Description=Calico virtual networking and network security per-host agent
Requires=network-online.target
After=network-online.target
Documentation=http://docs.projectcalico.org/v2.0/usage/configuration/
[Service]
Slice=machine.slice
Environment=CALICO_DISABLE_FILE_LOGGING=true
Environment=HOSTNAME=${ADVERTISE_IP}
Environment=IP=${ADVERTISE_IP}
Environment=FELIX_FELIXHOSTNAME=${ADVERTISE_IP}
Environment=CALICO_NETWORKING=false
Environment=NO_DEFAULT_POOLS=true
Environment=ETCD_CA_CERT_FILE=/etc/etcd2/ssl/ca.pem
Environment=ETCD_CERT_FILE=/etc/etcd2/ssl/etcd-client.pem
Environment=ETCD_KEY_FILE=/etc/etcd2/ssl/etcd-client-key.pem
Environment=ETCD_ENDPOINTS=${ETCD_ENDPOINTS_NETWORK}
ExecStart=/usr/bin/rkt run --inherit-env \\
--stage1-from-dir=stage1-fly.aci \\
--volume=modules,kind=host,source=/lib/modules,readOnly=false \\
--mount=volume=modules,target=/lib/modules \\
--volume=dns,kind=host,source=/etc/resolv.conf,readOnly=true \\
--mount=volume=dns,target=/etc/resolv.conf \\
--volume=hosts,kind=host,source=/etc/hosts,readOnly=true \\
--mount volume=hosts,target=/etc/hosts \\
--volume=ssl,kind=host,source=/etc/kubernetes/ssl,readOnly=true \\
--mount=volume=ssl,target=/etc/etcd2/ssl \\
--trust-keys-from-https ${CALICO_NODE_IMAGE}
KillMode=mixed
Restart=always
TimeoutStartSec=0
[Install]
WantedBy=multi-user.target
EOF
fi
local TEMPLATE=/etc/kubernetes/manifests/kube-proxy.yaml
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
# Documentation: http://kubernetes.io/docs/admin/kube-proxy/
apiVersion: v1
kind: Pod
metadata:
name: kube-proxy
namespace: kube-system
annotations:
rkt.alpha.kubernetes.io/stage1-name-override: coreos.com/rkt/stage1-fly
spec:
hostNetwork: true
containers:
- name: kube-proxy
image: $DOCKER_BOOTSTRAP_REPO/${HYPERKUBE_IMAGE_REPO}:${K8S_VER}
command:
- /hyperkube
- proxy
- --master=http://127.0.0.1:8080
- --proxy-mode=iptables
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
- mountPath: /var/run/dbus
name: dbus
readOnly: false
volumes:
- hostPath:
path: /usr/share/ca-certificates
name: ssl-certs-host
- hostPath:
path: /var/run/dbus
name: dbus
EOF
fi
local TEMPLATE=/etc/kubernetes/manifests/kube-apiserver.yaml
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
# Documentation: http://kubernetes.io/docs/admin/kube-apiserver/
apiVersion: v1
kind: Pod
metadata:
name: kube-apiserver
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: kube-apiserver
image: $DOCKER_BOOTSTRAP_REPO/${HYPERKUBE_IMAGE_REPO}:${K8S_VER}
command:
- /hyperkube
- apiserver
- --bind-address=0.0.0.0
- --etcd-servers=${ETCD_ENDPOINTS}
- --etcd-cafile=/etc/kubernetes/ssl/ca.pem
- --etcd-certfile=/etc/kubernetes/ssl/etcd-client.pem
- --etcd-keyfile=/etc/kubernetes/ssl/etcd-client-key.pem
- --allow-privileged=true
- --service-cluster-ip-range=${SERVICE_IP_RANGE}
- --secure-port=443
- --advertise-address=${ADVERTISE_IP}
- --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota
- --tls-cert-file=/etc/kubernetes/ssl/apiserver.pem
- --tls-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem
- --client-ca-file=/etc/kubernetes/ssl/ca.pem
- --service-account-lookup=true
- --service-account-key-file=/etc/kubernetes/ssl/apiserver-key.pem
- --authorization-mode=RBAC
- --authorization-rbac-super-user=admin
- --runtime-config=extensions/v1beta1=true,rbac.authorization.k8s.io/v1alpha1=true,extensions/v1beta1/networkpolicies=true
- --feature-gates=AllAlpha=true
- --cloud-provider=''
- --v=2
livenessProbe:
httpGet:
host: 127.0.0.1
port: 8080
path: /healthz
initialDelaySeconds: 15
timeoutSeconds: 15
ports:
- containerPort: 443
hostPort: 443
name: https
- containerPort: 8080
hostPort: 8080
name: local
volumeMounts:
- mountPath: /etc/kubernetes/ssl
name: ssl-certs-kubernetes
readOnly: true
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
volumes:
- hostPath:
path: /etc/kubernetes/ssl
name: ssl-certs-kubernetes
- hostPath:
path: /usr/share/ca-certificates
name: ssl-certs-host
EOF
fi
local TEMPLATE=/etc/kubernetes/manifests/kube-controller-manager.yaml
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
# Documentation: http://kubernetes.io/docs/admin/kube-controller-manager/
apiVersion: v1
kind: Pod
metadata:
name: kube-controller-manager
namespace: kube-system
spec:
containers:
- name: kube-controller-manager
image: $DOCKER_BOOTSTRAP_REPO/${HYPERKUBE_IMAGE_REPO}:${K8S_VER}
command:
- /hyperkube
- controller-manager
- --master=http://127.0.0.1:8080
- --leader-elect=true
- --service-account-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem
- --root-ca-file=/etc/kubernetes/ssl/ca.pem
- --node-monitor-period=2s
- --node-monitor-grace-period=20s
- --resource-quota-sync-period=1m0s
- --horizontal-pod-autoscaler-sync-period=10s
- --deleting-pods-burst=10
- --namespace-sync-period=1m0s
- --concurrent-deployment-syncs=20
- --concurrent-endpoint-syncs=20
- --concurrent-replicaset-syncs=40
- --pod-eviction-timeout=30s
- --feature-gates=AllAlpha=true
resources:
requests:
cpu: 200m
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 10252
initialDelaySeconds: 15
timeoutSeconds: 15
volumeMounts:
- mountPath: /etc/kubernetes/ssl
name: ssl-certs-kubernetes
readOnly: true
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
hostNetwork: true
volumes:
- hostPath:
path: /etc/kubernetes/ssl
name: ssl-certs-kubernetes
- hostPath:
path: /usr/share/ca-certificates
name: ssl-certs-host
EOF
fi
local TEMPLATE=/etc/kubernetes/manifests/kube-scheduler.yaml
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
# Documentation: http://kubernetes.io/docs/admin/kube-scheduler/
apiVersion: v1
kind: Pod
metadata:
name: kube-scheduler
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: kube-scheduler
image: $DOCKER_BOOTSTRAP_REPO/${HYPERKUBE_IMAGE_REPO}:${K8S_VER}
command:
- /hyperkube
- scheduler
- --master=http://127.0.0.1:8080
- --leader-elect=true
- --feature-gates=AllAlpha=true
resources:
requests:
cpu: 100m
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 10251
initialDelaySeconds: 15
timeoutSeconds: 15
EOF
fi
local TEMPLATE=/etc/kubernetes/manifests/calico-policy-controller.yaml
if [ "${USE_CALICO}" = "true" ] && [ ! -f "${TEMPLATE}" ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
# Documentation: http://kubernetes.io/docs/user-guide/networkpolicies/
apiVersion: v1
kind: Pod
metadata:
name: calico-policy-controller
namespace: calico-system
spec:
hostNetwork: true
containers:
# The Calico policy controller.
- name: kube-policy-controller
image: $DOCKER_BOOTSTRAP_REPO/${POLICY_CONTROLLER_IMAGE}
env:
- name: ETCD_CA_CERT_FILE
value: "/etc/etcd2/ssl/ca.pem"
- name: ETCD_CERT_FILE
value: "/etc/etcd2/ssl/etcd-client.pem"
- name: ETCD_KEY_FILE
value: "/etc/etcd2/ssl/etcd-client-key.pem"
- name: ETCD_ENDPOINTS
value: "${ETCD_ENDPOINTS_NETWORK}"
- name: K8S_API
value: "http://127.0.0.1:8080"
- name: LEADER_ELECTION
value: "true"
volumeMounts:
- mountPath: /etc/etcd2/ssl
name: ssl
# Leader election container used by the policy controller.
- name: leader-elector
image: $DOCKER_BOOTSTRAP_REPO/${POLICY_LEADER_ELECTOR_IMAGE}
imagePullPolicy: IfNotPresent
args:
- "--election=calico-policy-election"
- "--election-namespace=calico-system"
- "--http=127.0.0.1:4040"
volumes:
- name: ssl
hostPath:
path: /etc/kubernetes/ssl
EOF
fi
local TEMPLATE=/srv/kubernetes/manifests/calico-system.json
if [ "${USE_CALICO}" = "true" ] && [ ! -f "${TEMPLATE}" ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
{
"apiVersion": "v1",
"kind": "Namespace",
"metadata": {
"name": "calico-system"
}
}
EOF
fi
local TEMPLATE=/etc/flannel/options.env
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
FLANNELD_IFACE=$private_ipv4
FLANNELD_ETCD_ENDPOINTS=$ETCD_ENDPOINTS_NETWORK
FLANNELD_ETCD_CAFILE=/etc/kubernetes/ssl/ca.pem
FLANNELD_ETCD_CERTFILE=/etc/kubernetes/ssl/etcd-client.pem
FLANNELD_ETCD_KEYFILE=/etc/kubernetes/ssl/etcd-client-key.pem
EOF
fi
local TEMPLATE=/etc/systemd/system/flanneld.service.d/40-ExecStartPre-symlink.conf.conf
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
[Service]
Environment="ETCD_SSL_DIR=/etc/kubernetes/ssl"
ExecStartPre=/usr/bin/ln -sf /etc/flannel/options.env /run/flannel/options.env
TimeoutStartSec=120
EOF
fi
local TEMPLATE=/etc/systemd/system/docker.service.d/40-flannel.conf
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
[Unit]
Requires=flanneld.service
After=flanneld.service
[Service]
EnvironmentFile=/etc/kubernetes/cni/docker_opts_cni.env
EOF
fi
local TEMPLATE=/etc/kubernetes/cni/docker_opts_cni.env
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
DOCKER_OPT_BIP=""
DOCKER_OPT_IPMASQ=""
EOF
fi
local TEMPLATE=/etc/kubernetes/cni/net.d/10-calico.conf
if [ "${USE_CALICO}" = "true" ] && [ ! -f "${TEMPLATE}" ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
{
"name": "calico",
"type": "flannel",
"delegate": {
"type": "calico",
"etcd_endpoints": "$ETCD_ENDPOINTS_NETWORK",
"log_level": "none",
"log_level_stderr": "info",
"hostname": "${ADVERTISE_IP}",
"policy": {
"type": "k8s",
"k8s_api_root": "http://127.0.0.1:8080/api/v1/"
}
}
}
EOF
fi
local TEMPLATE=/etc/kubernetes/cni/net.d/10-flannel.conf
if [ "${USE_CALICO}" = "false" ] && [ ! -f "${TEMPLATE}" ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
{
"name": "podnet",
"type": "flannel",
"delegate": {
"isDefaultGateway": true
}
}
EOF
fi
}
function start_addons {
echo "Waiting for Kubernetes API..."
until curl --silent "http://127.0.0.1:8080/version"
do
sleep 5
done
echo
}
function enable_calico_policy {
echo "Waiting for Kubernetes API..."
until curl --silent "http://127.0.0.1:8080/version"
do
sleep 5
done
echo
echo "K8S: Calico Policy"
curl --silent -H "Content-Type: application/json" -XPOST -d"$(cat /srv/kubernetes/manifests/calico-system.json)" "http://127.0.0.1:8080/api/v1/namespaces/" > /dev/null
}
init_config
init_templates
chmod +x /opt/bin/host-rkt
init_flannel
systemctl daemon-reload
if [ $CONTAINER_RUNTIME = "rkt" ]; then
systemctl enable load-rkt-stage1
systemctl enable rkt-api
fi
systemctl enable flanneld; systemctl start flanneld
systemctl enable kubelet; systemctl start kubelet
if [ $USE_CALICO = "true" ]; then
systemctl enable calico-node; systemctl start calico-node
enable_calico_policy
fi
start_addons
echo "DONE"
#!/bin/bash
set -e
# List of etcd servers (http://ip:port), comma separated
export ETCD_ENDPOINTS=
# The endpoint the worker node should use to contact controller nodes (https://ip:port)
# In HA configurations this should be an external DNS record or loadbalancer in front of the control nodes.
# However, it is also possible to point directly to a single control node.
export CONTROLLER_ENDPOINT=
# Specify the version (vX.Y.Z) of Kubernetes assets to deploy
export K8S_VER=v1.4.6_coreos.0
# Hyperkube image repository to use.
export HYPERKUBE_IMAGE_REPO=quay.io/coreos/hyperkube
# The IP address of the cluster DNS service.
# This must be the same DNS_SERVICE_IP used when configuring the controller nodes.
export DNS_SERVICE_IP=10.3.0.10
# Whether to use Calico for Kubernetes network policy.
export USE_CALICO=false
# Determines the container runtime for kubernetes to use. Accepts 'docker' or 'rkt'.
export CONTAINER_RUNTIME=docker
# The above settings can optionally be overridden using an environment file:
ENV_FILE=/run/coreos-kubernetes/options.env
# -------------
function init_config {
local REQUIRED=( 'ADVERTISE_IP' 'ETCD_ENDPOINTS' 'CONTROLLER_ENDPOINT' 'DNS_SERVICE_IP' 'K8S_VER' 'HYPERKUBE_IMAGE_REPO' 'USE_CALICO' )
if [ -f $ENV_FILE ]; then
export $(cat $ENV_FILE | egrep -v "^\s*(#|$)" | xargs)
fi
if [ -z $ADVERTISE_IP ]; then
export ADVERTISE_IP=$(awk -F= '/COREOS_PUBLIC_IPV4/ {print $2}' /etc/environment)
fi
for REQ in "${REQUIRED[@]}"; do
if [ -z "$(eval echo \$$REQ)" ]; then
echo "Missing required config value: ${REQ}"
exit 1
fi
done
}
function init_templates {
local TEMPLATE=/etc/systemd/system/kubelet.service
local uuid_file="/var/run/kubelet-pod.uuid"
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
[Unit]
Description=Kubernetes Kubelet via Hyperkube ACI
Documentation=http://kubernetes.io/docs/admin/kubelet
Documentation=https://coreos.com/kubernetes/docs/latest/kubelet-wrapper.html
[Service]
Environment=KUBELET_VERSION=${K8S_VER}
Environment=KUBELET_ACI=docker://$DOCKER_BOOTSTRAP_REPO/${HYPERKUBE_IMAGE_REPO}
Environment="RKT_OPTS=--insecure-options=image \\
--uuid-file-save=${uuid_file} \\
--set-env=ETCD_CA_CERT_FILE=/etc/kubernetes/ssl/ca.pem \\
--set-env=ETCD_CERT_FILE=/etc/kubernetes/ssl/etcd-client.pem \\
--set-env=ETCD_KEY_FILE=/etc/kubernetes/ssl/etcd-client-key.pem \\
--volume iscsiadm,kind=host,source=/usr/sbin/iscsiadm \\
--mount volume=iscsiadm,target=/usr/sbin/iscsiadm \\
--volume modprobe,kind=host,source=/usr/sbin/modprobe \\
--mount volume=modprobe,target=/usr/sbin/modprobe \\
--volume lib-modules,kind=host,source=/lib/modules \\
--mount volume=lib-modules,target=/lib/modules \\
--volume etc-hosts,kind=host,source=/etc/hosts,readOnly=true \\
--mount volume=etc-hosts,target=/etc/hosts \\
--volume dns,kind=host,source=/etc/resolv.conf \\
--mount volume=dns,target=/etc/resolv.conf \\
--volume rkt,kind=host,source=/opt/bin/host-rkt \\
--mount volume=rkt,target=/usr/bin/rkt \\
--volume var-lib-rkt,kind=host,source=/var/lib/rkt \\
--mount volume=var-lib-rkt,target=/var/lib/rkt \\
--volume stage,kind=host,source=/tmp \\
--mount volume=stage,target=/tmp \\
--volume var-log,kind=host,source=/var/log \\
--mount volume=var-log,target=/var/log"
ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/usr/bin/mkdir -p /var/log/containers
ExecStartPre=-/usr/bin/rkt rm --uuid-file=${uuid_file}
ExecStart=/usr/lib/coreos/kubelet-wrapper \\
--api-servers=${CONTROLLER_ENDPOINT} \\
--cni-conf-dir=/etc/kubernetes/cni/net.d \\
--network-plugin=cni \\
--container-runtime=${CONTAINER_RUNTIME} \\
--rkt-path=/usr/bin/rkt \\
--rkt-stage1-image=coreos.com/rkt/stage1-coreos \
--register-node=true \\
--allow-privileged=true \\
--pod-manifest-path=/etc/kubernetes/manifests \\
--hostname-override=${ADVERTISE_IP} \\
--cluster_dns=${DNS_SERVICE_IP} \\
--cluster_domain=cluster.local \\
--node-labels=role=worker \\
--kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml \\
--tls-cert-file=/etc/kubernetes/ssl/worker.pem \\
--tls-private-key-file=/etc/kubernetes/ssl/worker-key.pem \\
--cloud-provider=''
--feature-gates=AllAlpha=true
ExecStop=-/usr/bin/rkt stop --uuid-file=${uuid_file}
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
fi
local TEMPLATE=/opt/bin/host-rkt
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
#!/bin/sh
# This is bind mounted into the kubelet rootfs and all rkt shell-outs go
# through this rkt wrapper. It essentially enters the host mount namespace
# (which it is already in) only for the purpose of breaking out of the chroot
# before calling rkt. It makes things like rkt gc work and avoids bind mounting
# in certain rkt filesystem dependancies into the kubelet rootfs. This can
# eventually be obviated when the write-api stuff gets upstream and rkt gc is
# through the api-server. Related issue:
# https://github.com/coreos/rkt/issues/2878
exec nsenter -m -u -i -n -p -t 1 -- /usr/bin/rkt "\$@"
EOF
fi
local TEMPLATE=/etc/systemd/system/load-rkt-stage1.service
if [ ${CONTAINER_RUNTIME} = "rkt" ] && [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
[Unit]
Description=Load rkt stage1 images
Documentation=http://github.com/coreos/rkt
Requires=network-online.target
After=network-online.target
Before=rkt-api.service
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStart=/usr/bin/rkt fetch /usr/lib/rkt/stage1-images/stage1-coreos.aci /usr/lib/rkt/stage1-images/stage1-fly.aci --insecure-options=image
[Install]
RequiredBy=rkt-api.service
EOF
fi
local TEMPLATE=/etc/systemd/system/rkt-api.service
if [ ${CONTAINER_RUNTIME} = "rkt" ] && [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
[Unit]
Before=kubelet.service
[Service]
ExecStart=/usr/bin/rkt api-service
Restart=always
RestartSec=10
[Install]
RequiredBy=kubelet.service
EOF
fi
local TEMPLATE=/etc/systemd/system/calico-node.service
if [ "${USE_CALICO}" = "true" ] && [ ! -f "${TEMPLATE}" ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
[Unit]
Description=Calico virtual networking and network security per-host agent
Requires=network-online.target
After=network-online.target
Documentation=http://docs.projectcalico.org/v2.0/usage/configuration/
[Service]
Slice=machine.slice
Environment=CALICO_DISABLE_FILE_LOGGING=true
Environment=HOSTNAME=${ADVERTISE_IP}
Environment=IP=${ADVERTISE_IP}
Environment=FELIX_FELIXHOSTNAME=${ADVERTISE_IP}
Environment=CALICO_NETWORKING=false
Environment=NO_DEFAULT_POOLS=true
Environment=ETCD_CA_CERT_FILE=/etc/etcd2/ssl/ca.pem
Environment=ETCD_CERT_FILE=/etc/etcd2/ssl/etcd-client.pem
Environment=ETCD_KEY_FILE=/etc/etcd2/ssl/etcd-client-key.pem
Environment=ETCD_ENDPOINTS=${ETCD_ENDPOINTS_NETWORK}
ExecStart=/usr/bin/rkt run --inherit-env \\
--stage1-from-dir=stage1-fly.aci \\
--volume=modules,kind=host,source=/lib/modules,readOnly=false \\
--mount=volume=modules,target=/lib/modules \\
--volume=dns,kind=host,source=/etc/resolv.conf,readOnly=true \\
--mount=volume=dns,target=/etc/resolv.conf \\
--volume=hosts,kind=host,source=/etc/hosts,readOnly=true \\
--mount volume=hosts,target=/etc/hosts \\
--volume=ssl,kind=host,source=/etc/kubernetes/ssl,readOnly=true \\
--mount=volume=ssl,target=/etc/etcd2/ssl \\
--trust-keys-from-https ${CALICO_NODE_IMAGE}
KillMode=mixed
Restart=always
TimeoutStartSec=0
[Install]
WantedBy=multi-user.target
EOF
fi
local TEMPLATE=/etc/kubernetes/worker-kubeconfig.yaml
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
apiVersion: v1
kind: Config
clusters:
- name: local
cluster:
server: ${CONTROLLER_ENDPOINT}
certificate-authority: /etc/kubernetes/ssl/ca.pem
users:
- name: kubelet
user:
client-certificate: /etc/kubernetes/ssl/worker.pem
client-key: /etc/kubernetes/ssl/worker-key.pem
contexts:
- context:
cluster: local
user: kubelet
name: kubelet-context
current-context: kubelet-context
EOF
fi
local TEMPLATE=/etc/kubernetes/manifests/kube-proxy.yaml
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
apiVersion: v1
kind: Pod
metadata:
name: kube-proxy
namespace: kube-system
annotations:
rkt.alpha.kubernetes.io/stage1-name-override: coreos.com/rkt/stage1-fly
spec:
hostNetwork: true
containers:
- name: kube-proxy
image: $DOCKER_BOOTSTRAP_REPO/${HYPERKUBE_IMAGE_REPO}:${K8S_VER}
command:
- /hyperkube
- proxy
- --master=${CONTROLLER_ENDPOINT}
- --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml
- --proxy-mode=iptables
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/ssl/certs
name: "ssl-certs"
- mountPath: /etc/kubernetes/worker-kubeconfig.yaml
name: "kubeconfig"
readOnly: true
- mountPath: /etc/kubernetes/ssl
name: "etc-kube-ssl"
readOnly: true
- mountPath: /var/run/dbus
name: dbus
readOnly: false
volumes:
- name: "ssl-certs"
hostPath:
path: "/usr/share/ca-certificates"
- name: "kubeconfig"
hostPath:
path: "/etc/kubernetes/worker-kubeconfig.yaml"
- name: "etc-kube-ssl"
hostPath:
path: "/etc/kubernetes/ssl"
- hostPath:
path: /var/run/dbus
name: dbus
EOF
fi
local TEMPLATE=/etc/flannel/options.env
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
FLANNELD_IFACE=$private_ipv4
FLANNELD_ETCD_ENDPOINTS=$ETCD_ENDPOINTS_NETWORK
FLANNELD_ETCD_CAFILE=/etc/kubernetes/ssl/ca.pem
FLANNELD_ETCD_CERTFILE=/etc/kubernetes/ssl/etcd-client.pem
FLANNELD_ETCD_KEYFILE=/etc/kubernetes/ssl/etcd-client-key.pem
EOF
fi
local TEMPLATE=/etc/systemd/system/flanneld.service.d/40-ExecStartPre-symlink.conf.conf
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
[Service]
Environment="ETCD_SSL_DIR=/etc/kubernetes/ssl"
ExecStartPre=/usr/bin/ln -sf /etc/flannel/options.env /run/flannel/options.env
TimeoutStartSec=120
EOF
fi
local TEMPLATE=/etc/systemd/system/docker.service.d/40-flannel.conf
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
[Unit]
Requires=flanneld.service
After=flanneld.service
[Service]
EnvironmentFile=/etc/kubernetes/cni/docker_opts_cni.env
EOF
fi
local TEMPLATE=/etc/kubernetes/cni/docker_opts_cni.env
if [ ! -f $TEMPLATE ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
DOCKER_OPT_BIP=""
DOCKER_OPT_IPMASQ=""
EOF
fi
local TEMPLATE=/etc/kubernetes/cni/net.d/10-calico.conf
if [ "${USE_CALICO}" = "true" ] && [ ! -f "${TEMPLATE}" ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
{
"name": "calico",
"type": "flannel",
"delegate": {
"type": "calico",
"etcd_endpoints": "$ETCD_ENDPOINTS_NETWORK",
"log_level": "none",
"log_level_stderr": "info",
"hostname": "${ADVERTISE_IP}",
"policy": {
"type": "k8s",
"k8s_api_root": "${CONTROLLER_ENDPOINT}/api/v1/",
"k8s_client_key": "/etc/kubernetes/ssl/worker-key.pem",
"k8s_client_certificate": "/etc/kubernetes/ssl/worker.pem"
}
}
}
EOF
fi
local TEMPLATE=/etc/kubernetes/cni/net.d/10-flannel.conf
if [ "${USE_CALICO}" = "false" ] && [ ! -f "${TEMPLATE}" ]; then
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname $TEMPLATE)
cat << EOF > $TEMPLATE
{
"name": "podnet",
"type": "flannel",
"delegate": {
"isDefaultGateway": true
}
}
EOF
fi
}
init_config
init_templates
chmod +x /opt/bin/host-rkt
systemctl daemon-reload
if [ $CONTAINER_RUNTIME = "rkt" ]; then
systemctl enable load-rkt-stage1
systemctl enable rkt-api
fi
systemctl enable flanneld; systemctl start flanneld
systemctl enable kubelet; systemctl start kubelet
if [ $USE_CALICO = "true" ]; then
systemctl enable calico-node; systemctl start calico-node
fi
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment