Basic HA kubernetes deployment (#7)
* remove old files * sketch of non-bootkube genesis * add basic chroot/bootstrap script * cleanup kubectl/kubelet fetching * fix cni bin asset path * add non-pod asset loader * add example ca * refactor key gen/distribution * flannel up on genesis * refactor some code toward join * WIP: last commit working on "self-hosted, helm-managed" * first pass at consolidating config for vanilla deploy * refactor cli a bit * use provided cluster ca * separate genesis and join scripts * add basic etcd joining * actually run the proxy everywhere * update readme * enable kubelet service * add pki most places * use consistent sa keypair * use quay.io/attcomdev/promenade * fix typo in n3 * tls everywhere in kubernetes * tls for etcd * remove currently unused files
This commit is contained in:
parent
41ceee7e19
commit
fce98459a6
@ -1,2 +1,7 @@
|
||||
Makefile
|
||||
promenade-*.tar
|
||||
__pycache__
|
||||
Vagrantfile
|
||||
.vagrant
|
||||
promenade.tar
|
||||
promenade.egg-info
|
||||
example
|
||||
setup.sh
|
||||
|
4
.gitignore
vendored
4
.gitignore
vendored
@ -1,10 +1,14 @@
|
||||
__pycache__
|
||||
/*.log
|
||||
/*.tar
|
||||
/.vagrant
|
||||
/cni.tgz
|
||||
/env.sh
|
||||
/helm
|
||||
/kubectl
|
||||
/kubelet
|
||||
/linux-amd64
|
||||
/genesis_image_cache/
|
||||
/join_image_cache/
|
||||
/promenade.egg-info
|
||||
/tmp
|
||||
|
59
Dockerfile
Normal file
59
Dockerfile
Normal file
@ -0,0 +1,59 @@
|
||||
# Copyright 2017 The Promenade Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM python:3.6
|
||||
|
||||
ENV CNI_VERSION=v0.5.2 \
|
||||
HELM_VERSION=v2.4.2 \
|
||||
KUBECTL_VERSION=v1.6.2 \
|
||||
KUBELET_VERSION=v1.6.2
|
||||
|
||||
VOLUME /etc/promenade
|
||||
VOLUME /target
|
||||
|
||||
RUN mkdir /promenade
|
||||
WORKDIR /promenade
|
||||
|
||||
RUN set -ex \
|
||||
&& export BIN_DIR=/assets/usr/local/bin \
|
||||
&& mkdir -p $BIN_DIR \
|
||||
&& curl -sLo $BIN_DIR/kubelet http://storage.googleapis.com/kubernetes-release/release/$KUBELET_VERSION/bin/linux/amd64/kubelet \
|
||||
&& curl -sLo $BIN_DIR/kubectl http://storage.googleapis.com/kubernetes-release/release/$KUBECTL_VERSION/bin/linux/amd64/kubectl \
|
||||
&& chmod 555 $BIN_DIR/kubelet \
|
||||
&& chmod 555 $BIN_DIR/kubectl \
|
||||
&& mkdir -p /assets/opt/cni/bin \
|
||||
&& curl -sL https://github.com/containernetworking/cni/releases/download/$CNI_VERSION/cni-amd64-$CNI_VERSION.tgz | tar -zxv -C /assets/opt/cni/bin/ \
|
||||
&& curl -sL https://storage.googleapis.com/kubernetes-helm/helm-${HELM_VERSION}-linux-amd64.tar.gz | tar -zxv -C /tmp linux-amd64/helm \
|
||||
&& mv /tmp/linux-amd64/helm $BIN_DIR/helm \
|
||||
&& chmod 555 $BIN_DIR/helm
|
||||
|
||||
RUN set -ex \
|
||||
&& apt-get update -qq \
|
||||
&& apt-get install --no-install-recommends -y \
|
||||
libyaml-dev \
|
||||
openssl \
|
||||
rsync \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN set -ex \
|
||||
&& curl -sLo /usr/local/bin/cfssl https://pkg.cfssl.org/R1.1/cfssl_linux-amd64 \
|
||||
&& chmod 555 /usr/local/bin/cfssl \
|
||||
&& curl -sLo /usr/local/bin/cfssljson https://pkg.cfssl.org/R1.1/cfssljson_linux-amd64 \
|
||||
&& chmod 555 /usr/local/bin/cfssljson
|
||||
|
||||
COPY requirements-frozen.txt /promenade
|
||||
RUN pip install --no-cache-dir -r requirements-frozen.txt
|
||||
|
||||
COPY . /promenade
|
||||
RUN pip install -e /promenade
|
@ -1,37 +0,0 @@
|
||||
# Copyright 2017 The Promenade Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM ubuntu:xenial
|
||||
|
||||
ENV NODE_HOSTNAME=
|
||||
|
||||
RUN apt-get update -qq \
|
||||
&& apt-get install --no-install-recommends -y \
|
||||
docker.io \
|
||||
gettext-base \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& mkdir /promenade \
|
||||
&& mkdir /promenade/assets \
|
||||
&& mkdir /promenade/scripts
|
||||
|
||||
WORKDIR /promenade
|
||||
|
||||
ENTRYPOINT /promenade/scripts/entrypoint.sh
|
||||
|
||||
COPY genesis_image_cache/* cni.tgz helm kubelet /promenade/
|
||||
|
||||
COPY kubelet.service.template /promenade/
|
||||
COPY env.sh scripts/common/* /promenade/scripts/
|
||||
COPY scripts/entrypoint-genesis.sh /promenade/scripts/entrypoint.sh
|
||||
COPY assets/ /promenade/assets/
|
@ -1,37 +0,0 @@
|
||||
# Copyright 2017 The Promenade Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM ubuntu:xenial
|
||||
|
||||
ENV NODE_HOSTNAME=
|
||||
|
||||
RUN apt-get update -qq \
|
||||
&& apt-get install --no-install-recommends -y \
|
||||
docker.io \
|
||||
gettext-base \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& mkdir /promenade \
|
||||
&& mkdir /promenade/assets \
|
||||
&& mkdir /promenade/scripts
|
||||
|
||||
WORKDIR /promenade
|
||||
|
||||
ENTRYPOINT /promenade/scripts/entrypoint.sh
|
||||
|
||||
COPY join_image_cache/* cni.tgz kubelet /promenade/
|
||||
|
||||
COPY kubelet.service.template /promenade/
|
||||
COPY env.sh scripts/common/* /promenade/scripts/
|
||||
COPY scripts/entrypoint-join.sh /promenade/scripts/entrypoint.sh
|
||||
COPY assets/kubeconfig assets/auth/kubeconfig /promenade/assets/
|
161
Makefile
161
Makefile
@ -1,161 +0,0 @@
|
||||
# Copyright 2017 The Promenade Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
#---------------#
|
||||
# Configuration #
|
||||
#---------------#
|
||||
BOOTKUBE_VERSION := v0.4.1
|
||||
CNI_VERSION := v0.5.2
|
||||
HELM_VERSION := v2.3.1
|
||||
KUBERNETES_VERSION := v1.6.2
|
||||
|
||||
NAMESPACE := quay.io/attcomdev
|
||||
GENESIS_REPO := promenade-genesis
|
||||
JOIN_REPO := promenade-join
|
||||
TAG := dev
|
||||
|
||||
#PreFetch Images for Offline deployment
|
||||
PREFETCH_IMAGES := false
|
||||
|
||||
GENESIS_IMAGES := \
|
||||
gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.1 \
|
||||
gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.1 \
|
||||
gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.1 \
|
||||
gcr.io/google_containers/pause-amd64:3.0 \
|
||||
quay.io/calico/cni:v1.7.0 \
|
||||
quay.io/calico/kube-policy-controller:v0.5.4 \
|
||||
quay.io/calico/node:v1.1.3 \
|
||||
quay.io/coreos/bootkube:$(BOOTKUBE_VERSION) \
|
||||
quay.io/coreos/etcd-operator:v0.2.5 \
|
||||
quay.io/coreos/etcd:v3.1.4 \
|
||||
quay.io/coreos/etcd:v3.1.6 \
|
||||
quay.io/coreos/flannel:v0.7.1 \
|
||||
quay.io/coreos/hyperkube:$(KUBERNETES_VERSION)_coreos.0 \
|
||||
quay.io/coreos/kenc:48b6feceeee56c657ea9263f47b6ea091e8d3035 \
|
||||
quay.io/coreos/pod-checkpointer:20cf8b9a6018731a0770192f30dfa7a1941521e3 \
|
||||
|
||||
JOIN_IMAGES := \
|
||||
gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.1 \
|
||||
gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.1 \
|
||||
gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.1 \
|
||||
gcr.io/google_containers/pause-amd64:3.0 \
|
||||
quay.io/calico/cni:v1.7.0 \
|
||||
quay.io/calico/kube-policy-controller:v0.5.4 \
|
||||
quay.io/calico/node:v1.1.3 \
|
||||
quay.io/coreos/etcd-operator:v0.2.5 \
|
||||
quay.io/coreos/etcd:v3.1.4 \
|
||||
quay.io/coreos/etcd:v3.1.6 \
|
||||
quay.io/coreos/flannel:v0.7.1 \
|
||||
quay.io/coreos/hyperkube:$(KUBERNETES_VERSION)_coreos.0 \
|
||||
quay.io/coreos/kenc:48b6feceeee56c657ea9263f47b6ea091e8d3035 \
|
||||
quay.io/coreos/pod-checkpointer:20cf8b9a6018731a0770192f30dfa7a1941521e3 \
|
||||
|
||||
#Build Dependencies online vs offline
|
||||
GENESIS_BUILD_DEPS := Dockerfile.genesis cni.tgz env.sh helm kubelet kubelet.service.template
|
||||
|
||||
ifeq ($(PREFETCH_IMAGES), true)
|
||||
GENESIS_BUILD_DEPS += genesis_image_cache/genesis-images.tar
|
||||
endif
|
||||
|
||||
JOIN_BUILD_DEPS := Dockerfile.join kubelet.service.template
|
||||
|
||||
ifeq ($(PREFETCH_IMAGES), true)
|
||||
JOIN_BUILD_DEPS += join_image_cache/join-images.tar
|
||||
endif
|
||||
|
||||
#-------#
|
||||
# Rules #
|
||||
#-------#
|
||||
all: build
|
||||
|
||||
build: build-genesis build-join
|
||||
|
||||
push: push-genesis push-join
|
||||
|
||||
save: save-genesis save-join
|
||||
|
||||
genesis: build-genesis
|
||||
|
||||
build-genesis: $(GENESIS_BUILD_DEPS)
|
||||
sudo docker build -f Dockerfile.genesis -t $(NAMESPACE)/$(GENESIS_REPO):$(TAG) .
|
||||
|
||||
|
||||
push-genesis: build-genesis
|
||||
sudo docker push $(NAMESPACE)/$(GENESIS_REPO):$(TAG)
|
||||
|
||||
save-genesis: build-genesis
|
||||
sudo docker save $(NAMESPACE)/$(GENESIS_REPO):$(TAG) > promenade-genesis.tar
|
||||
|
||||
|
||||
join: build-join
|
||||
|
||||
build-join: $(JOIN_BUILD_DEPS)
|
||||
sudo docker build -f Dockerfile.join -t $(NAMESPACE)/$(JOIN_REPO):$(TAG) .
|
||||
|
||||
push-join: build-join
|
||||
sudo docker push $(NAMESPACE)/$(JOIN_REPO):$(TAG)
|
||||
|
||||
save-join: build-join
|
||||
sudo docker save $(NAMESPACE)/$(JOIN_REPO):$(TAG) > promenade-join.tar
|
||||
|
||||
cni.tgz:
|
||||
curl -Lo cni.tgz https://github.com/containernetworking/cni/releases/download/$(CNI_VERSION)/cni-amd64-$(CNI_VERSION).tgz
|
||||
|
||||
env.sh: Makefile
|
||||
rm -f env.sh
|
||||
echo export BOOTKUBE_VERSION=$(BOOTKUBE_VERSION) >> env.sh
|
||||
echo export CNI_VERSION=$(CNI_VERSION) >> env.sh
|
||||
echo export HELM_VERSION=$(HELM_VERSION) >> env.sh
|
||||
echo export KUBERNETES_VERSION=$(KUBERNETES_VERSION) >> env.sh
|
||||
|
||||
helm:
|
||||
curl -Lo helm.tgz https://storage.googleapis.com/kubernetes-helm/helm-$(HELM_VERSION)-linux-amd64.tar.gz
|
||||
tar xf helm.tgz
|
||||
mv linux-amd64/helm ./helm
|
||||
rm -rf ./linux-amd64/
|
||||
rm -f helm.tgz
|
||||
chmod +x helm
|
||||
|
||||
genesis_image_cache/genesis-images.tar:
|
||||
for IMAGE in $(GENESIS_IMAGES); do \
|
||||
sudo docker pull $$IMAGE; \
|
||||
done
|
||||
mkdir genesis_image_cache
|
||||
sudo docker save -o genesis_image_cache/genesis-images.tar $(GENESIS_IMAGES)
|
||||
|
||||
join_image_cache/join-images.tar:
|
||||
for IMAGE in $(JOIN_IMAGES); do \
|
||||
sudo docker pull $$IMAGE; \
|
||||
done
|
||||
mkdir join_image_cache
|
||||
sudo docker save -o join_image_cache/join-images.tar $(JOIN_IMAGES)
|
||||
|
||||
kubelet:
|
||||
curl -LO http://storage.googleapis.com/kubernetes-release/release/$(KUBERNETES_VERSION)/bin/linux/amd64/kubelet
|
||||
chmod +x kubelet
|
||||
|
||||
clean:
|
||||
rm -rf \
|
||||
*.tar \
|
||||
cni.tgz \
|
||||
env.sh \
|
||||
helm \
|
||||
helm.tgz \
|
||||
kubelet \
|
||||
linux-amd64 \
|
||||
genesis_image_cache \
|
||||
join_image_cache \
|
||||
|
||||
|
||||
.PHONY : build build-genesis build-join clean genesis join push push-genesis push-join
|
70
README.md
70
README.md
@ -1,81 +1,39 @@
|
||||
# Overview
|
||||
|
||||
Promenade is tool for deploying self-hosted, highly resilient Kubernetes clusters using
|
||||
[bootkube](https://github.com/kubernetes-incubator/bootkube). Currently. Promenade works by leveraging Docker containers with the Bootkube binaries in order to setup Kubernetes on the host operating system. Default Kubernetes assets and manifests are included in this repo, but it is recommended to render or supply your own assets for real-world deployments.
|
||||
Promenade is tool for deploying self-hosted, highly resilient Kubernetes clusters.
|
||||
|
||||
## Quickstart using Vagrant
|
||||
|
||||
Make sure you have [Vagrant](https://vagrantup.com) and
|
||||
[VirtualBox](https://www.virtualbox.org/wiki/Downloads) installed. Then
|
||||
install the `vagrant-hostmanager` plugin.
|
||||
[VirtualBox](https://www.virtualbox.org/wiki/Downloads) installed.
|
||||
|
||||
```bash
|
||||
vagrant plugin install vagrant-hostmanager
|
||||
```
|
||||
|
||||
Build the genesis and join images and save them to disk for quick loading into
|
||||
the Vagrant VMs.
|
||||
|
||||
```bash
|
||||
make save
|
||||
```
|
||||
|
||||
Start the VMs and save a snapshot for quicker iteration:
|
||||
Start the VMs:
|
||||
|
||||
```bash
|
||||
vagrant up
|
||||
vagrant snapshot save clean
|
||||
```
|
||||
|
||||
Spin up a cluster:
|
||||
Start the genesis node:
|
||||
|
||||
```bash
|
||||
./test-install.sh
|
||||
vagrant ssh n0 -c 'sudo /vagrant/genesis.sh /vagrant/example/vagrant-config.yaml'
|
||||
```
|
||||
|
||||
Watch nodes spin up:
|
||||
Join the master nodes:
|
||||
|
||||
```bash
|
||||
watch kubectl --insecure-skip-tls-verify --kubeconfig <(sed 's/kubernetes:443/192.168.77.10:443/' < assets/kubeconfig) get nodes
|
||||
vagrant ssh n1 -c 'sudo /vagrant/join.sh /vagrant/example/vagrant-config.yaml'
|
||||
vagrant ssh n2 -c 'sudo /vagrant/join.sh /vagrant/example/vagrant-config.yaml'
|
||||
```
|
||||
|
||||
To test changes, you can safely reset single or multiple nodes:
|
||||
Join the worker node:
|
||||
|
||||
```bash
|
||||
vagrant snapshot resotre n2 clean --no-provision
|
||||
vagrant snapshot restore clean --no-provision
|
||||
vagrant ssh n3 -c 'sudo /vagrant/join.sh /vagrant/example/vagrant-config.yaml'
|
||||
```
|
||||
|
||||
## Detailed Deployment
|
||||
## Building the image
|
||||
|
||||
The below steps can be used to deploy a cluster on bare metal or virtual nodes:
|
||||
|
||||
1. Overwrite the placeholder assets in the `assets` directory.
|
||||
|
||||
2. Make sure the `Makefile` lists the images and versions you expect to be
|
||||
required.
|
||||
|
||||
3. Build the images with `make build`
|
||||
|
||||
4. Setup each host with the following:
|
||||
- DNS resolution pointing `kubernetes` to the appropriate IPs (Kubernetes master nodes) for the
|
||||
Kubernetes API
|
||||
- A running docker daemon, configured to use the DNS resolution specified
|
||||
above (see `vagrant-assets/docker-daemon.json`)
|
||||
|
||||
5. Transfer the appropriate images to each host. You may find it useful to
|
||||
run `make save`, transfer the image and then use `docker load -i ...` to
|
||||
restore it rather than to rely on a registry.
|
||||
|
||||
6. On the genesis (seed) server, start the cluster supplying in the node's FQDNs:
|
||||
`docker run --rm -v /:/target -v /var/run/docker.sock:/var/run/docker.sock -e NODE_HOSTNAME=genesis-node.fqdn quay.io/attcomdev/promenade-genesis:dev`
|
||||
|
||||
7. On each additional node to be joined to the cluster:
|
||||
`docker run --rm -v /:/target -v /var/run/docker.sock:/var/run/docker.sock -e NODE_HOSTNAME=join-node.fqdn quay.io/attcomdev/promenade-join:dev`
|
||||
|
||||
|
||||
## References:
|
||||
|
||||
1. [Demo of Genesis Node Deployment](https://asciinema.org/a/c2fdtzh2z2fiymiyu75b32u0h)
|
||||
|
||||
2. [Demo of Joining Node to Cluster](https://asciinema.org/a/62dssvuiwbdanfuwwe6vzcihk)
|
||||
```bash
|
||||
docker build -t quay.io/attcomdev/promenade:experimental .
|
||||
```
|
||||
|
32
Vagrantfile
vendored
32
Vagrantfile
vendored
@ -5,37 +5,14 @@ Vagrant.configure("2") do |config|
|
||||
config.vm.box = "ubuntu/xenial64"
|
||||
config.vm.box_check_update = false
|
||||
|
||||
config.vm.provision :file, source: "vagrant-assets/docker-daemon.json", destination: "/tmp/docker-daemon.json"
|
||||
config.vm.provision :file, source: "vagrant-assets/dnsmasq-kubernetes", destination: "/tmp/dnsmasq-kubernetes"
|
||||
|
||||
config.vm.provision :shell, privileged: true, inline:<<EOS
|
||||
set -ex
|
||||
|
||||
echo === Installing packages ===
|
||||
echo === Setting up NTP so simulate MaaS environment ===
|
||||
apt-get update -qq
|
||||
apt-get install -y -qq --no-install-recommends \
|
||||
docker.io \
|
||||
dnsmasq \
|
||||
gettext-base \
|
||||
|
||||
echo === Setting up DNSMasq ===
|
||||
mv /tmp/dnsmasq-kubernetes /etc/dnsmasq.d/
|
||||
chown root:root /etc/dnsmasq.d/dnsmasq-kubernetes
|
||||
chmod 444 /etc/dnsmasq.d/dnsmasq-kubernetes
|
||||
systemctl restart dnsmasq
|
||||
|
||||
echo === Reconfiguring Docker ===
|
||||
mv /tmp/docker-daemon.json /etc/docker/daemon.json
|
||||
chown root:root /etc/docker/daemon.json
|
||||
chmod 444 /etc/docker/daemon.json
|
||||
systemctl restart docker
|
||||
|
||||
echo === Done ===
|
||||
apt-get install -y -qq --no-install-recommends chrony
|
||||
EOS
|
||||
|
||||
config.hostmanager.enabled = true
|
||||
config.hostmanager.manage_guest = true
|
||||
|
||||
config.vm.provider "virtualbox" do |vb|
|
||||
vb.cpus = 2
|
||||
vb.memory = "2048"
|
||||
@ -56,4 +33,9 @@ EOS
|
||||
c.vm.network "private_network", ip: "192.168.77.12"
|
||||
end
|
||||
|
||||
config.vm.define "n3" do |c|
|
||||
c.vm.hostname = "n3"
|
||||
c.vm.network "private_network", ip: "192.168.77.13"
|
||||
end
|
||||
|
||||
end
|
||||
|
@ -1,17 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
server: https://kubernetes:443
|
||||
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM2RENDQWRDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFsTVJFd0R3WURWUVFLRXdoaWIyOTAKYTNWaVpURVFNQTRHQTFVRUF4TUhhM1ZpWlMxallUQWVGdzB4TnpBMU1Ua3hPRFF4TWpCYUZ3MHlOekExTVRjeApPRFF4TWpCYU1DVXhFVEFQQmdOVkJBb1RDR0p2YjNScmRXSmxNUkF3RGdZRFZRUURFd2RyZFdKbExXTmhNSUlCCklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUF1bEFWZnpUZS9tTWwzMU5BeDdQNTI0c3oKblFLbXhHK0JYZkRQdDRPNzc4dEJGNzZSc0VYK3dLclJ0b29CcjdheGh2UjBvazVrRFpQQVJHcE5LQVJtZENTbQozMzZFckZ0cVR3TW9yZVk3V1ZDVTJDQkZPdHQydW1mSkR1R1ZvTlVIRWtEOE1lVjJsWUpDb3h3SnJoZTV3aXFxCm00aHB0U0NlcFVqaWxta1JlV1ErL040K1JWRHByODZHWTJRQlVsdjlPdEE1aHhUaXNiQTAxU3dTUEFXcnBPcVYKOEpJajJSTFpuODVGVHpNRlRRazBXdTBadWdpcnlxZGF4bDMzVkwzK1VSSTNRQzJyMmRwdmQxU2V5V0RFWHZqbQprbjkyMzh3ZSsyd0JlUmFjZUN2QzdqeUR2WVNPaFMrajkyd0ZkblFZeCtIaW5BOG5uOFFmZG0zOHU2QTlod0lECkFRQUJveU13SVRBT0JnTlZIUThCQWY4RUJBTUNBcVF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUcKOXcwQkFRc0ZBQU9DQVFFQURIdmd0RENFOHR2MGxLSXpFcWZ1YlVBNUxLUTROaVQ1U1VBdWNZYXpNcEt3MVFJVwpRaW5Db0xFZnlQTXdna2JnWGp6d25lOFB4ZUVqanZ3Q1Jxd2J5VmlCV252OTM3cDk0cG9aLzlHM0NXM2JTWXdRCjRaZVpud1c2d1cwSUdzRWhlTXdrbkJlUWJvb2NNNmNYdThodG8xQVlIT25qdGcydDFSdWZXcHNEbjVhb2t1Vy8KUkk4SGc1dm5XV0tBQUF3Y3drbWc4YWlOLzFuWVFHL2NvRDQxa1hlL2lKMURUUFphMkNQeGdtNzFmMmhSbkVZVApjN3VUN3V1ZUJhcG8xTyt0dFBrZ2hzSXZQWktjNnZLeEswd3J2ekhHUm9VTGw3N1o4M3o5MmFvUEx6Y21uSjNkCk1GRXE0ZDdKUTV1NWkrU2FxcXFPZHAxUkdBaXVpTnBjdnlQOWV3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURBekNDQWV1Z0F3SUJBZ0lJTE1Qa0xkMkUvdUF3RFFZSktvWklodmNOQVFFTEJRQXdKVEVSTUE4R0ExVUUKQ2hNSVltOXZkR3QxWW1VeEVEQU9CZ05WQkFNVEIydDFZbVV0WTJFd0hoY05NVGN3TlRFNU1UZzBNVEl3V2hjTgpNVGd3TlRFNU1UZzBNVEl4V2pBck1SY3dGUVlEVlFRS0V3NXplWE4wWlcwNmJXRnpkR1Z5Y3pFUU1BNEdBMVVFCkF4TUhhM1ZpWld4bGREQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUx0ejltSG8KdFBraWRQYlFldTlSUzZ0QU9RaEFoUE96Vjd5NWt4bzlaa3lHUjVtT0o1TUVsZm9vZkhXR1hEcUpzM0lITzZacgpaVEtUWWdYNmMzamlzTWhJVDYySm5OOVphQVRXY3JkK3FRMTVpeFROaHFkeTNVY1g2eGxCOFlGOEtwVlo0MHJPCndyUC9Vc0c5RWFCaXQzN2lPbW1JTklrWnRiTklodk9ZaGtKdnIrTk90WC84VHNuUlpwVDlQeUNleVpKYnNaSVoKZDFBcGZ1MkVOZVMxQzFPZ09RSUVPUkVCZWhjM0dWSDExRDlCUnRGb2IyMk1qWlVqeHlHajBTQnV0VW1wdm5ZOQpvZ2ZFNXBUMHloSStrWmxQNmlNUGtrMG9HbGtjYytVNFg4VnJTeVlYZkpORWJtSTVhRFplM0E0bGs0ZlhpRi9ZCk5vc2JIWW56ZGYvajBhY0NBd0VBQWFNeE1DOHdEZ1lEVlIwUEFRSC9CQVFEQWdXZ01CMEdBMVVkSlFRV01CUUcKQ0NzR0FRVUZCd01CQmdnckJnRUZCUWNEQWpBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQUlnYXhPNmFBeUdScQpNSU5QSUQ1YkcvWlNSb0lCU0VYMGJBdmlMS1dQOVJvbmpmYXlNOFhiM3IyV1o0VG1Kb1lZRE5NUkZveUNlU3R3CjFmamw3YjJ2cG1GQk94bHBtUnZOaFJGMWRsSTlSdDRHUlJWa3hlUzdjNGRrYzBMRlRIRVBwMFgvUm1TdDR1ZisKWDlzWXNXT0dTQmY1MitxWi83VU5JNlNZd29sdGVuemJ3bkxIWTlOU0xYaVZGb21tQ1hQYUJtYTFHbGtRTjJGMwpjRUluaGY3OEJYS1hlSXBXZFpib0h1V09VdTNhb1JUMHA2ZmVnYjJVeGgyYTczczZzVG9IakU3b3kzSDJadktSCmtjRkoyVG5LTXJxekVLLzl3eWMvZ3Uva1lWeDgvekNvUGxEUUFTZW03YVRaZ09JRFo4d2M0ZzlyQml0bnhkSXMKanhad2pPS3Q5Zz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
|
||||
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcGdJQkFBS0NBUUVBdTNQMlllaTArU0owOXRCNjcxRkxxMEE1Q0VDRTg3Tlh2TG1UR2oxbVRJWkhtWTRuCmt3U1YraWg4ZFlaY09vbXpjZ2M3cG10bE1wTmlCZnB6ZU9Ld3lFaFByWW1jMzFsb0JOWnl0MzZwRFhtTEZNMkcKcDNMZFJ4ZnJHVUh4Z1h3cWxWbmpTczdDcy85U3diMFJvR0szZnVJNmFZZzBpUm0xczBpRzg1aUdRbSt2NDA2MQpmL3hPeWRGbWxQMC9JSjdKa2x1eGtobDNVQ2wrN1lRMTVMVUxVNkE1QWdRNUVRRjZGemNaVWZYVVAwRkcwV2h2CmJZeU5sU1BISWFQUklHNjFTYW0rZGoyaUI4VG1sUFRLRWo2Um1VL3FJdytTVFNnYVdSeHo1VGhmeFd0TEpoZDgKazBSdVlqbG9ObDdjRGlXVGg5ZUlYOWcyaXhzZGlmTjEvK1BScHdJREFRQUJBb0lCQVFDUnB6SmJzNERqVUhYSAp6Z2luNmVnOUFhTVBHV3IxSFhaZ0MyWVU3bjZObVkwSzhOMHBMRmdJeitxZE96Qnd2OHh5SHRLbnBpMDAxalpGClpPelNrbnBBdFlkTDFYRFNUMXMyM3hhMkk3SGg2WDQ3Uk5PTFN3SkxHbmV2NFlCeFYzU1RKZ3dwZFd6dWhjYmQKQ1Rjb0EyeUhKK3V4VW9kWHZHVm1FRVhrQTdEVzd6TFpwdkxKLy9uRDV6NUNNMElVUGRhU2dYaFlRcDJOWld0SQpSakxkamt1WVZ5QllDMnJVNExwbWlIMWVJVkw3YkRIb1VRaE9hSE4wd1NGRzgwbzQ2Z3ZycWJock1QdzdCd0l1CmJDVzMwcTRZNEpQUlluNXJ1MHpDRm9ybmU2NUkya1J0bkpVRGpuOTlkT250V1ZaaWJSb2pZMGhGRkV5R1lPaloKV0l0ekdBYnhBb0dCQU5GajJaSGl0UXh0cVlzN01OSVk5anovN3B6dVBhWDhkbSsyLzNXVzVBb3QwMStzNHlWSApwZDdIRThsNU5qbmVqV0c3bkcyR1BzSWhiQ0NWWEV0U01HdDFCUmlvS3BjMmRMcStaUWI3NUxHRE1hSnpNV0VtCi9IaW1KdWhYdnhPenpLQzlaMjl2bzRkNkpDNTh2UHd5dTI3ZEZBdjNyekFjZGlXYi9haWI3UzZaQW9HQkFPVXUKQmVQWmdxbHB3bDN3cURsQWxqaUxzSDhBZVpVSDJyREE0bjRkKzFrS1BNcUpZTW1mdEdhVGtESk1lSmZpc2ZLYgpFWGNRc0dKQWVPTEhScFkxVnZrSHFuNXYrN3FnOUpIU25sdysvblRGNVZrNklTQUZNczJRZndkcTZmWjg5OEdaCm1pOVZYcjBoZXo3Wi92L2xpQ3hCY2wwaGdBaG5qSUZHdlE1clNtby9Bb0dCQUl2bFZGV2R6Q3lUai9VUUJOdzYKQlRwWUhBb0pPbk1OcSt1VHJqWFlMRitJb25LSHhmTUFYWmZzRmhKRHc3RUNPaCtVQXoxQnRlaHFBQjM4N0g3KwpXSTlTemFiZHBDY0hJUklyWnNBMXgyTzZMWTFGdlRZVm9CVFRuYWNhQ1BXVzZSNXpyUW5NNHNyL0ZmRmhNYnFtCkFvaGRlS2xPUUdPNmdFMDhYVXNyY2xueEFvR0JBTE92K2Y1RHRDYVFQVXphTzR0b0VHQVZaalN0Y3FaZW1pQ3IKbXVtM0tETVB5OG9oSERuNWRjQlhRbCt0aFgvUXhpU3BZSEF5TFpsYlkyeXJRYlRUN1hVamhaSE15MW53aU5FcwppZTFabHJpSDBPSzhxT3dxSjBMMVlDTzR0K2dDNDE1dnlHd0VTMXVPdk1yeXNQU0NTdG9vRmpyZTRUdTF0SHhICnNrTno2OHlSQW9HQkFKeU1Gb1F1MHJ6T3hDd1F4KzhtMWVuY205cGNVdnUyZVN3d3krOTQ2MFc0NzRXdzRxWkEKRjREV3dqRGc1ZEJHMUltMjFLSUpraG9YNTc5ZGgzUUlScjNQUndsUVVrUWx4VHJVU0V0cGZOVFUzcHZXVjlCRgp0dUxTMVRuT2R3ZW9ROGNHWlpkOVBXTUxMckJkMEplUjRGeUgyM3JPVW1NRndKMkE2T29wZVg2QgotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kubelet
|
@ -1,60 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: bootstrap-kube-apiserver
|
||||
namespace: kube-system
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-apiserver
|
||||
image: quay.io/coreos/hyperkube:v1.6.2_coreos.0
|
||||
command:
|
||||
- /usr/bin/flock
|
||||
- --exclusive
|
||||
- --timeout=30
|
||||
- /var/lock/api-server.lock
|
||||
- /hyperkube
|
||||
- apiserver
|
||||
- --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota
|
||||
- --advertise-address=$(POD_IP)
|
||||
- --allow-privileged=true
|
||||
- --authorization-mode=RBAC
|
||||
- --bind-address=0.0.0.0
|
||||
- --client-ca-file=/etc/kubernetes/secrets/ca.crt
|
||||
- --etcd-servers=http://10.3.0.15:2379,http://127.0.0.1:12379
|
||||
- --insecure-port=0
|
||||
- --kubelet-client-certificate=/etc/kubernetes/secrets/apiserver.crt
|
||||
- --kubelet-client-key=/etc/kubernetes/secrets/apiserver.key
|
||||
- --secure-port=443
|
||||
- --service-account-key-file=/etc/kubernetes/secrets/service-account.pub
|
||||
- --service-cluster-ip-range=10.3.0.0/24
|
||||
- --storage-backend=etcd3
|
||||
- --tls-ca-file=/etc/kubernetes/secrets/ca.crt
|
||||
- --tls-cert-file=/etc/kubernetes/secrets/apiserver.crt
|
||||
- --tls-private-key-file=/etc/kubernetes/secrets/apiserver.key
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
volumeMounts:
|
||||
- mountPath: /etc/ssl/certs
|
||||
name: ssl-certs-host
|
||||
readOnly: true
|
||||
- mountPath: /etc/kubernetes/secrets
|
||||
name: secrets
|
||||
readOnly: true
|
||||
- mountPath: /var/lock
|
||||
name: var-lock
|
||||
readOnly: false
|
||||
hostNetwork: true
|
||||
volumes:
|
||||
- name: secrets
|
||||
hostPath:
|
||||
path: /etc/kubernetes/bootstrap-secrets
|
||||
- name: ssl-certs-host
|
||||
hostPath:
|
||||
path: /usr/share/ca-certificates
|
||||
- name: var-lock
|
||||
hostPath:
|
||||
path: /var/lock
|
@ -1,35 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: bootstrap-kube-controller-manager
|
||||
namespace: kube-system
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-controller-manager
|
||||
image: quay.io/coreos/hyperkube:v1.6.2_coreos.0
|
||||
command:
|
||||
- ./hyperkube
|
||||
- controller-manager
|
||||
- --allocate-node-cidrs=true
|
||||
- --cluster-cidr=10.2.0.0/16
|
||||
- --configure-cloud-routes=false
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig
|
||||
- --leader-elect=true
|
||||
- --root-ca-file=/etc/kubernetes/bootstrap-secrets/ca.crt
|
||||
- --service-account-private-key-file=/etc/kubernetes/bootstrap-secrets/service-account.key
|
||||
volumeMounts:
|
||||
- name: kubernetes
|
||||
mountPath: /etc/kubernetes
|
||||
readOnly: true
|
||||
- name: ssl-host
|
||||
mountPath: /etc/ssl/certs
|
||||
readOnly: true
|
||||
hostNetwork: true
|
||||
volumes:
|
||||
- name: kubernetes
|
||||
hostPath:
|
||||
path: /etc/kubernetes
|
||||
- name: ssl-host
|
||||
hostPath:
|
||||
path: /usr/share/ca-certificates
|
@ -1,30 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: bootstrap-etcd
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: boot-etcd
|
||||
spec:
|
||||
containers:
|
||||
- name: etcd
|
||||
image: quay.io/coreos/etcd:v3.1.6
|
||||
command:
|
||||
- /usr/local/bin/etcd
|
||||
- --name=boot-etcd
|
||||
- --listen-client-urls=http://0.0.0.0:12379
|
||||
- --listen-peer-urls=http://0.0.0.0:12380
|
||||
- --advertise-client-urls=http://$(MY_POD_IP):12379
|
||||
- --initial-advertise-peer-urls=http://$(MY_POD_IP):12380
|
||||
- --initial-cluster=boot-etcd=http://$(MY_POD_IP):12380
|
||||
- --initial-cluster-token=bootkube
|
||||
- --initial-cluster-state=new
|
||||
- --data-dir=/var/etcd/data
|
||||
env:
|
||||
- name: MY_POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
hostNetwork: true
|
||||
restartPolicy: Never
|
@ -1,24 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: bootstrap-kube-scheduler
|
||||
namespace: kube-system
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-scheduler
|
||||
image: quay.io/coreos/hyperkube:v1.6.2_coreos.0
|
||||
command:
|
||||
- ./hyperkube
|
||||
- scheduler
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig
|
||||
- --leader-elect=true
|
||||
volumeMounts:
|
||||
- name: kubernetes
|
||||
mountPath: /etc/kubernetes
|
||||
readOnly: true
|
||||
hostNetwork: true
|
||||
volumes:
|
||||
- name: kubernetes
|
||||
hostPath:
|
||||
path: /etc/kubernetes
|
@ -1 +0,0 @@
|
||||
auth/kubeconfig
|
@ -1,31 +0,0 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: etcd-operator
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: etcd-operator
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: etcd-operator
|
||||
spec:
|
||||
containers:
|
||||
- name: etcd-operator
|
||||
image: quay.io/coreos/etcd-operator:v0.2.5
|
||||
env:
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
@ -1,15 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: etcd-service
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
app: etcd
|
||||
etcd_cluster: kube-etcd
|
||||
clusterIP: 10.3.0.15
|
||||
ports:
|
||||
- name: client
|
||||
port: 2379
|
||||
protocol: TCP
|
@ -1,12 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
apiserver.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURoRENDQW15Z0F3SUJBZ0lJWVJUbkVVV1BCMkV3RFFZSktvWklodmNOQVFFTEJRQXdKVEVSTUE4R0ExVUUKQ2hNSVltOXZkR3QxWW1VeEVEQU9CZ05WQkFNVEIydDFZbVV0WTJFd0hoY05NVGN3TlRFNU1UZzBNVEl3V2hjTgpNVGd3TlRFNU1UZzBNVEl4V2pBdk1SUXdFZ1lEVlFRS0V3dHJkV0psTFcxaGMzUmxjakVYTUJVR0ExVUVBeE1PCmEzVmlaUzFoY0dselpYSjJaWEl3Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLQW9JQkFRREIKc1hEUUd0NENTa20rSDBvVDNIZ3pBRHpLM0lRdGM1UVZLVGIyRFR5dzIvbStoNE1SZDZuK2xyYThwdG8wOUlzLwpZaVZ4OE9DQ0ZGc083MjZaWnFMUWxRZVBERjM2UUtKYnBJeUdxMmIzR1ZCeURRcXRuNDd4aFhVZUx1MHo3SU1LCjg5MDZ4bVpYZWc4SEhUSVM5UDY2ejN4QTlrTG4wbndTU0ZKSEdUWE1vRnI4Y25MeVNucnRESGU5cEdvLytqY1IKMCtqaUgzYXQzdzJGMXRDYVRaOHpuRU1SUDgwQlR5c2I3SWxaZG1OQmZhU29UNDVOamUyZUJwWkRkeHZJOHFoaQpKMlpXWjd2UXN1NkFsQ25lS3BUajR0Z3NWNnNFQWdzMlY4cGFiUmFTTTV0MEhxMWxHby9ucGNPYW1JVVFBcTF1Ck8yU3BTVElvamRTSG1XZEQ5aDVkQWdNQkFBR2pnYTB3Z2Fvd0RnWURWUjBQQVFIL0JBUURBZ1dnTUIwR0ExVWQKSlFRV01CUUdDQ3NHQVFVRkJ3TUJCZ2dyQmdFRkJRY0RBakI1QmdOVkhSRUVjakJ3Z2dwcmRXSmxjbTVsZEdWegpnZ3ByZFdKbGNtNWxkR1Z6Z2hKcmRXSmxjbTVsZEdWekxtUmxabUYxYkhTQ0ZtdDFZbVZ5Ym1WMFpYTXVaR1ZtCllYVnNkQzV6ZG1PQ0pHdDFZbVZ5Ym1WMFpYTXVaR1ZtWVhWc2RDNXpkbU11WTJ4MWMzUmxjaTVzYjJOaGJJY0UKQ2dNQUFUQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFqOEc5TGM3NVFOYmhBRFF1T1hzU0dFaTZiSjBVZEZvVgp2djVWTE5NT2RaMCtqWHRIdExZckIzUnlnSWNvbFNkcWxhcXBNOW5qNjF4Z25oRzNPSVlJdzhCQ3FRbGFCZ08rCjVjQXZ6bXFsMjlBb0RiTGV1M0pjdG15U1NjcXlDajRtcXRsT0dIZ0lvdFVxMjI2UmUxYXFTSjh6TEg3VURWRWEKanlRbzh2bjVHUW0vWHd5R1V0NG5TcFlYTWk2TXp0ZWJjZVBkeU9lNDM4N05GSlM5L09VUUlkV2xodjFjZWdLKwpmVThLUnYyTWlCZlpacUoxRFFEMTdlVjk0OTRESW1HTjFuQ3BWbG1QTkJHVENlNzVTT1lDQk93WWhIS29OTUxuClltdG5wekJ0ZkFrVTRFemppTW02VjIyWEkvbFpzUWR4ZVFmTU1TY21oK002MERIcjdUb1JkZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
|
||||
apiserver.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBd2JGdzBCcmVBa3BKdmg5S0U5eDRNd0E4eXR5RUxYT1VGU2syOWcwOHNOdjV2b2VECkVYZXAvcGEydktiYU5QU0xQMklsY2ZEZ2doUmJEdTl1bVdhaTBKVUhqd3hkK2tDaVc2U01ocXRtOXhsUWNnMEsKclorTzhZVjFIaTd0TSt5REN2UGRPc1ptVjNvUEJ4MHlFdlQrdXM5OFFQWkM1OUo4RWtoU1J4azF6S0JhL0hKeQo4a3A2N1F4M3ZhUnFQL28zRWRQbzRoOTJyZDhOaGRiUW1rMmZNNXhERVQvTkFVOHJHK3lKV1haalFYMmtxRStPClRZM3RuZ2FXUTNjYnlQS29ZaWRtVm1lNzBMTHVnSlFwM2lxVTQrTFlMRmVyQkFJTE5sZktXbTBXa2pPYmRCNnQKWlJxUDU2WERtcGlGRUFLdGJqdGtxVWt5S0kzVWg1bG5RL1llWFFJREFRQUJBb0lCQUVSTjFaR2RsK0xJM2I1cwovRXVLdU55TFhlVFA1TkMrYkY4Vi9LckNPai9JSXdjY2RJMEpYT3BKcmNGVE9hbm8vdDNvTjNvNXpvSXd1WGZICjJZSEJIdk5kU3FBWVpWK2x3VnQ5Nkl4cEQxTmVHdTlOU0JHNExjbGdIYy82RG0zOEhxNFRGMVh0dHhOc0dMYVMKaGlFSFFua1FTQ29FYmMyZ2ZWNVpJS0t2OGpmcFNoWWlhQVB6cnQzc2FFLzIrT2xpSjVwNnpmWEtObEVzZzFVUwo3OGcrSmlPVlhaZEVRRnlQUDVZbzhnamU4d1EyTmV0bmlsUVE5cnRCYlB2OUZmc1RyajAzc3JsVTJEN0lJQmRRCjdEM1o1QU43ZTdSaXdSR21TdFo0R2xsY0N1aHZqaHZmaGF2MTMyRzAxbzgvRHd2VkxUbmZTS0ZBNytFOFVZRzkKNlpBelg0VUNnWUVBL3BYdDhlaGozczFmOGNOYVNFSmxEOEFzT0hnemN1eFJ2ZHJFK3pBOGw0ZUVGcFA1VUplagpPY0R1OTY2cTF5dDRRcDdZeDJzVzNVQTc2bTdSdWdkcUE1TVAyNWZnekdWNW4yMml3WWJuQlF2cURRRU9qTUgxCjFrMENrYVJYaERDekd1d2IwN29nL3JoT0pkQ0kzT1NDUXBMRDZCc1g4TVZQSi8yR2ZlNFhFQ2NDZ1lFQXdzVG8KL2lOWkZOS2tQd2xmanBpcnJ5NmdCN0daWVJZZGtuZU1NOTJmVHp1RGRxU0lyTTlvTEJlVXlpeEFmUFA5YzB5VgoyY3doYzhUTGRIeEl3YXR6Tk5Ka3dwMitlQU5mZzhqUTB2SzlKOFYwNjQ5QzVpTTNhWjVNVVZHMklTNFJBWnRICk1HMnc1ZnZkZDdTcUo4Uk9XVXk3K0UwczQ3MnlmSk5MM2F1TmE5c0NnWUVBNUFYUHdFc0FJSS9jYm9NbGV2RVUKNloyYlBkelRZQXl3VGhjRE5XU0g4TVN0Rnpma0p6NGFNV0ZQNkVIbXZLQXZyNlBzei9objJ6VnNOTmFiUEQ3bAp3bHZXNlQxSVdHcFBHKytyeGlDWkRKa1dRaDEvTmEySURqQ2RxMnNDQStGR21rZDl5UTY5L01lQkh6ZC9UakhSClJlV0VXSURqMllBd0hNWmp6cWtRdVNNQ2dZQTEwS3AvN2N4alVJQkpXcEdvbU01M0xPN1NzV09yeTZ5SUY3Z0oKYktia0FaR2xhbmpKSnRXbHVTNUhYa3JETzdjLzhGMUhQSHZSdlFKcVFSenBSaklpMmk4MUJ0amwyQ2pBQlBDTwpHTHZqRFUvczlqeUowaGt4ZWFla29Hc3VaOGdUSlpCWjlUVDNsc3Z1azJDZ2RFRWhzMjRNZ1daeDFxeEdkM3h5CjF6L1FHUUtCZ1FDRTdhZlp3SUVVUTZlcEdsVVB3bW0rbXFHaVVOYlhBei9QbksvSWh1T2VWOWFFVTc4bFBIOHAKNnJNcHVoNVNPcUdjUmFaaHBSU0lZM3ZxVTlZazQ5T080Qk9hd0YyajhrNHBWa1RhSkdnRDcxaW44YURiY1ZCYwpWbElNUDJxOTNtbnlPN09DOHpuUUtITXM1V1JXRW9rUmJTc2pXRWVRRjFNdHlCV2FJaVdtbGc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
|
||||
ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM2RENDQWRDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFsTVJFd0R3WURWUVFLRXdoaWIyOTAKYTNWaVpURVFNQTRHQTFVRUF4TUhhM1ZpWlMxallUQWVGdzB4TnpBMU1Ua3hPRFF4TWpCYUZ3MHlOekExTVRjeApPRFF4TWpCYU1DVXhFVEFQQmdOVkJBb1RDR0p2YjNScmRXSmxNUkF3RGdZRFZRUURFd2RyZFdKbExXTmhNSUlCCklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUF1bEFWZnpUZS9tTWwzMU5BeDdQNTI0c3oKblFLbXhHK0JYZkRQdDRPNzc4dEJGNzZSc0VYK3dLclJ0b29CcjdheGh2UjBvazVrRFpQQVJHcE5LQVJtZENTbQozMzZFckZ0cVR3TW9yZVk3V1ZDVTJDQkZPdHQydW1mSkR1R1ZvTlVIRWtEOE1lVjJsWUpDb3h3SnJoZTV3aXFxCm00aHB0U0NlcFVqaWxta1JlV1ErL040K1JWRHByODZHWTJRQlVsdjlPdEE1aHhUaXNiQTAxU3dTUEFXcnBPcVYKOEpJajJSTFpuODVGVHpNRlRRazBXdTBadWdpcnlxZGF4bDMzVkwzK1VSSTNRQzJyMmRwdmQxU2V5V0RFWHZqbQprbjkyMzh3ZSsyd0JlUmFjZUN2QzdqeUR2WVNPaFMrajkyd0ZkblFZeCtIaW5BOG5uOFFmZG0zOHU2QTlod0lECkFRQUJveU13SVRBT0JnTlZIUThCQWY4RUJBTUNBcVF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUcKOXcwQkFRc0ZBQU9DQVFFQURIdmd0RENFOHR2MGxLSXpFcWZ1YlVBNUxLUTROaVQ1U1VBdWNZYXpNcEt3MVFJVwpRaW5Db0xFZnlQTXdna2JnWGp6d25lOFB4ZUVqanZ3Q1Jxd2J5VmlCV252OTM3cDk0cG9aLzlHM0NXM2JTWXdRCjRaZVpud1c2d1cwSUdzRWhlTXdrbkJlUWJvb2NNNmNYdThodG8xQVlIT25qdGcydDFSdWZXcHNEbjVhb2t1Vy8KUkk4SGc1dm5XV0tBQUF3Y3drbWc4YWlOLzFuWVFHL2NvRDQxa1hlL2lKMURUUFphMkNQeGdtNzFmMmhSbkVZVApjN3VUN3V1ZUJhcG8xTyt0dFBrZ2hzSXZQWktjNnZLeEswd3J2ekhHUm9VTGw3N1o4M3o5MmFvUEx6Y21uSjNkCk1GRXE0ZDdKUTV1NWkrU2FxcXFPZHAxUkdBaXVpTnBjdnlQOWV3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
|
||||
service-account.pub: LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0KTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUExT0pRbUU5SkNJMjBoM0JJL3hKcApRb05JZll2aUhJaGx4NkFsNjBLdjRaYit0YUQrSmQ2cENiSHFqZ1l5aVlIMXdxMG5NQzlNaVJicGhkTXNLZkpYCm81N0gyWDFRV05jKzNSWXpORUwycmEycmtDR3dxMWpLR2s2Um9mYWdicmluakFDOWhHY20vVjcxM2ZDZFNwVUwKSDZSdXJvOUtqdnRjYTBuTGpCY0dDMDNwa3VVaTFlN0VQajJTQUxReEExaVYyK3NxcXBnMmF4bHB5QU43Z2VjYQpmalZOMTBra013OUdLdW1RcVVwZWpDdGYzdFR2enpmbUdxaU5uSERCOGxEblhwSGVjS0laa2ZYZEg1UGQ0alJZCjVEeUZmcnNMNXh5ME9IRjRyQS9FRFNGa2RFWjJyVFlpQ0IvTzE3cHc2THVFdTc5VjNOMmhKVkV3ZTRVdGkzb2wKUXdJREFRQUIKLS0tLS1FTkQgUFVCTElDIEtFWS0tLS0tCg==
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: kube-apiserver
|
||||
namespace: kube-system
|
||||
type: Opaque
|
@ -1,82 +0,0 @@
|
||||
---
|
||||
apiVersion: "extensions/v1beta1"
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-apiserver
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: control-plane
|
||||
component: kube-apiserver
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: control-plane
|
||||
component: kube-apiserver
|
||||
annotations:
|
||||
checkpointer.alpha.coreos.com/checkpoint: "true"
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-apiserver
|
||||
image: quay.io/coreos/hyperkube:v1.6.2_coreos.0
|
||||
command:
|
||||
- /usr/bin/flock
|
||||
- --exclusive
|
||||
- --timeout=30
|
||||
- /var/lock/api-server.lock
|
||||
- /hyperkube
|
||||
- apiserver
|
||||
- --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota
|
||||
- --advertise-address=$(POD_IP)
|
||||
- --allow-privileged=true
|
||||
- --anonymous-auth=false
|
||||
- --authorization-mode=RBAC
|
||||
- --bind-address=0.0.0.0
|
||||
- --client-ca-file=/etc/kubernetes/secrets/ca.crt
|
||||
- --cloud-provider=
|
||||
- --etcd-servers=http://10.3.0.15:2379
|
||||
- --insecure-port=0
|
||||
- --kubelet-client-certificate=/etc/kubernetes/secrets/apiserver.crt
|
||||
- --kubelet-client-key=/etc/kubernetes/secrets/apiserver.key
|
||||
- --secure-port=443
|
||||
- --service-account-key-file=/etc/kubernetes/secrets/service-account.pub
|
||||
- --service-cluster-ip-range=10.3.0.0/24
|
||||
- --storage-backend=etcd3
|
||||
- --tls-ca-file=/etc/kubernetes/secrets/ca.crt
|
||||
- --tls-cert-file=/etc/kubernetes/secrets/apiserver.crt
|
||||
- --tls-private-key-file=/etc/kubernetes/secrets/apiserver.key
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
volumeMounts:
|
||||
- mountPath: /etc/ssl/certs
|
||||
name: ssl-certs-host
|
||||
readOnly: true
|
||||
- mountPath: /etc/kubernetes/secrets
|
||||
name: secrets
|
||||
readOnly: true
|
||||
- mountPath: /var/lock
|
||||
name: var-lock
|
||||
readOnly: false
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: ssl-certs-host
|
||||
hostPath:
|
||||
path: /usr/share/ca-certificates
|
||||
- name: secrets
|
||||
secret:
|
||||
secretName: kube-apiserver
|
||||
- name: var-lock
|
||||
hostPath:
|
||||
path: /var/lock
|
@ -1,12 +0,0 @@
|
||||
---
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: kube-controller-manager
|
||||
namespace: kube-system
|
||||
spec:
|
||||
minAvailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
tier: control-plane
|
||||
component: kube-controller-manager
|
@ -1,10 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM2RENDQWRDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFsTVJFd0R3WURWUVFLRXdoaWIyOTAKYTNWaVpURVFNQTRHQTFVRUF4TUhhM1ZpWlMxallUQWVGdzB4TnpBMU1Ua3hPRFF4TWpCYUZ3MHlOekExTVRjeApPRFF4TWpCYU1DVXhFVEFQQmdOVkJBb1RDR0p2YjNScmRXSmxNUkF3RGdZRFZRUURFd2RyZFdKbExXTmhNSUlCCklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUF1bEFWZnpUZS9tTWwzMU5BeDdQNTI0c3oKblFLbXhHK0JYZkRQdDRPNzc4dEJGNzZSc0VYK3dLclJ0b29CcjdheGh2UjBvazVrRFpQQVJHcE5LQVJtZENTbQozMzZFckZ0cVR3TW9yZVk3V1ZDVTJDQkZPdHQydW1mSkR1R1ZvTlVIRWtEOE1lVjJsWUpDb3h3SnJoZTV3aXFxCm00aHB0U0NlcFVqaWxta1JlV1ErL040K1JWRHByODZHWTJRQlVsdjlPdEE1aHhUaXNiQTAxU3dTUEFXcnBPcVYKOEpJajJSTFpuODVGVHpNRlRRazBXdTBadWdpcnlxZGF4bDMzVkwzK1VSSTNRQzJyMmRwdmQxU2V5V0RFWHZqbQprbjkyMzh3ZSsyd0JlUmFjZUN2QzdqeUR2WVNPaFMrajkyd0ZkblFZeCtIaW5BOG5uOFFmZG0zOHU2QTlod0lECkFRQUJveU13SVRBT0JnTlZIUThCQWY4RUJBTUNBcVF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUcKOXcwQkFRc0ZBQU9DQVFFQURIdmd0RENFOHR2MGxLSXpFcWZ1YlVBNUxLUTROaVQ1U1VBdWNZYXpNcEt3MVFJVwpRaW5Db0xFZnlQTXdna2JnWGp6d25lOFB4ZUVqanZ3Q1Jxd2J5VmlCV252OTM3cDk0cG9aLzlHM0NXM2JTWXdRCjRaZVpud1c2d1cwSUdzRWhlTXdrbkJlUWJvb2NNNmNYdThodG8xQVlIT25qdGcydDFSdWZXcHNEbjVhb2t1Vy8KUkk4SGc1dm5XV0tBQUF3Y3drbWc4YWlOLzFuWVFHL2NvRDQxa1hlL2lKMURUUFphMkNQeGdtNzFmMmhSbkVZVApjN3VUN3V1ZUJhcG8xTyt0dFBrZ2hzSXZQWktjNnZLeEswd3J2ekhHUm9VTGw3N1o4M3o5MmFvUEx6Y21uSjNkCk1GRXE0ZDdKUTV1NWkrU2FxcXFPZHAxUkdBaXVpTnBjdnlQOWV3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
|
||||
service-account.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBMU9KUW1FOUpDSTIwaDNCSS94SnBRb05JZll2aUhJaGx4NkFsNjBLdjRaYit0YUQrCkpkNnBDYkhxamdZeWlZSDF3cTBuTUM5TWlSYnBoZE1zS2ZKWG81N0gyWDFRV05jKzNSWXpORUwycmEycmtDR3cKcTFqS0drNlJvZmFnYnJpbmpBQzloR2NtL1Y3MTNmQ2RTcFVMSDZSdXJvOUtqdnRjYTBuTGpCY0dDMDNwa3VVaQoxZTdFUGoyU0FMUXhBMWlWMitzcXFwZzJheGxweUFON2dlY2FmalZOMTBra013OUdLdW1RcVVwZWpDdGYzdFR2Cnp6Zm1HcWlObkhEQjhsRG5YcEhlY0tJWmtmWGRINVBkNGpSWTVEeUZmcnNMNXh5ME9IRjRyQS9FRFNGa2RFWjIKclRZaUNCL08xN3B3Nkx1RXU3OVYzTjJoSlZFd2U0VXRpM29sUXdJREFRQUJBb0lCQUhTV2pYVWMxdTZzVE5adwpGRW85bHhBcVBpVWoydTJ0ZGJCaWNPSHJYOCs0bGo1NnNUV2tRQWRqUFFZVE50SkFMb3d6c0dhZlFOZERpUmtWCmtmWlhGdEF4UVZwSFd4Mk1wSTBJZjNwN3dnVlVPOFZ2N2dXcFZ1WVphWUMrUlJiZVlrUTJrNVJUdWZMQmN2M2QKclFjUG9VdnZEZjdqMHYyRGhCWHVFRi9rckJhNzBPbkk2RnY1YjZUYXk0Y042dm1OSlNQVWxEUHZpY0Npem12VgpXdEFxNXBrUGZYVzF1d2VNWURPU0QxMHphZXRjbE1hZS8wQzFoYWhrOWtHb0x2NDlYbktDWC9MdXp3eDBTaEpMCkYwWmsrMHM5bm1NQUFmUkw4Sk03RTlpd1hhOEk0elhwYU5PTjVSZnpkVVFlVTZwdWhOUXJNRXhyZnpGWVdZVmwKclBhUm5xRUNnWUVBNEM3aTlCMDh3UitKRWJLZUV2VFR6VUFTOFcrUzVsU2t6UE43NVR0NGFIZVRvanp2UlhhMApuVXZiciswUEdjdHBhM093RHpoL1NheUtxa0p2V3p4V216S0VMVHNXa3BVWkx5eDM3b3hrb1ErZFVLU0ZEWUY3CmVqR1lmcXRoVUM2NU5BMHJxbXo2cWlDSy9SRlhMMWloTVkwZi83NCtJekNob2lmdHBGUTBwdDhDZ1lFQTh4am4KakhjQnBHbVVPeUtSV2ttVE0xeDNsNU5oVDJiWll5NUNHUFhaOHRpdTZ6ZGkyZ3cyeFVtZ1ZJUHpVblREcW1PSApOUHVSdkh2MnNvdnFac0FwRGFua3d6c1d0aEZMVkZqUGRwWGpWYStHdnA2WU4wRlRlZUlFakd1am1DSjlaajliCm9JazRvNmdSelFOeDVML1JhRTIvb1FyVEd3bENXZUE0NHBINmdoMENnWUVBMEtaU3pPazVWblZIV1pWbzBqUFQKdlVCWllTUjdFS3pQQllISVdqM1RmMGRydktBQ0FpRE5VV2o4K3V3a0ZkbmdNQVhvWXdJdVZoK2tuM3Bkc2dpaQpncWV0cFh0Tk12aGFEREhUSGM3RkNiSkN0SCtxNWpzUTlWV2JuS2xkVlFkbmtDNkI2WWlzZEJMOXlUT09kWjZECnlGNlUzYTN1bjBudjVjQkx5Wm9sdHZrQ2dZRUE1QWV4YzZaU0tRcE1YR2dobG1LN3JJc0pOMnFzOWhGUXkyTWgKNTAzK29uaTFJN2p4aGYyOUJyVDRxeTZXK1ByRWE3a3VvL2x6REMzd0RDMklzOWQrNnUwNXhCUlNTbmpRZzQ5SApGRUtuVzhIcGtEY3VLMjZnd2d6TUhYZituZitFUjN3WkUrNkQ3YWdEQXA4L244WjZ4TzloV012Um1HUElGSXhxCmI4VmxDZFVDZ1lCZ3dmVXNTc0NNUDhLVk9KQXV3ZjQvU1dPa0lVVVFIUVVqMUN5RXoyVVdHNVFpUDJ3cUZpQTcKSUg4SzhKc085TVNXcTNuZFI5a1IrSEdCQ2tKeXlvRDFHekJaZVJoUGIrNjlmWVdhbzNsS1V6RURxbXhCN3pqaApOUGx0YkxsR0dOYlBoY3pYeUplU3YxTjk0TVV3WTF3dDBhQVg2RytIaUJJOGEzY2pDL2NRUGc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: kube-controller-manager
|
||||
namespace: kube-system
|
||||
type: Opaque
|
@ -1,77 +0,0 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kube-controller-manager
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: control-plane
|
||||
component: kube-controller-manager
|
||||
spec:
|
||||
replicas: 2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: control-plane
|
||||
component: kube-controller-manager
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: tier
|
||||
operator: In
|
||||
values:
|
||||
- control-plane
|
||||
- key: component
|
||||
operator: In
|
||||
values:
|
||||
- kube-contoller-manager
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: kube-controller-manager
|
||||
image: quay.io/coreos/hyperkube:v1.6.2_coreos.0
|
||||
command:
|
||||
- ./hyperkube
|
||||
- controller-manager
|
||||
- --allocate-node-cidrs=true
|
||||
- --cloud-provider=
|
||||
- --cluster-cidr=10.2.0.0/16
|
||||
- --configure-cloud-routes=false
|
||||
- --leader-elect=true
|
||||
- --root-ca-file=/etc/kubernetes/secrets/ca.crt
|
||||
- --service-account-private-key-file=/etc/kubernetes/secrets/service-account.key
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10252 # Note: Using default port. Update if --port option is set differently.
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
volumeMounts:
|
||||
- name: secrets
|
||||
mountPath: /etc/kubernetes/secrets
|
||||
readOnly: true
|
||||
- name: ssl-host
|
||||
mountPath: /etc/ssl/certs
|
||||
readOnly: true
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: secrets
|
||||
secret:
|
||||
secretName: kube-controller-manager
|
||||
- name: ssl-host
|
||||
hostPath:
|
||||
path: /usr/share/ca-certificates
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
@ -1,21 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "KubeDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: 10.3.0.10
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
@ -1,49 +0,0 @@
|
||||
---
|
||||
apiVersion: "extensions/v1beta1"
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-etcd-network-checkpointer
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: control-plane
|
||||
component: kube-etcd-network-checkpointer
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: control-plane
|
||||
component: kube-etcd-network-checkpointer
|
||||
annotations:
|
||||
checkpointer.alpha.coreos.com/checkpoint: "true"
|
||||
spec:
|
||||
containers:
|
||||
- image: quay.io/coreos/kenc:48b6feceeee56c657ea9263f47b6ea091e8d3035
|
||||
name: kube-etcd-network-checkpointer
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /etc/kubernetes/selfhosted-etcd
|
||||
name: checkpoint-dir
|
||||
readOnly: false
|
||||
- mountPath: /var/lock
|
||||
name: var-lock
|
||||
readOnly: false
|
||||
command:
|
||||
- /usr/bin/flock
|
||||
- /var/lock/kenc.lock
|
||||
- -c
|
||||
- "kenc -r -m iptables && kenc -m iptables"
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: checkpoint-dir
|
||||
hostPath:
|
||||
path: /etc/kubernetes/checkpoint-iptables
|
||||
- name: var-lock
|
||||
hostPath:
|
||||
path: /var/lock
|
@ -1,39 +0,0 @@
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: canal-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
# Configure this with the location of your etcd cluster.
|
||||
etcd_endpoints: "http://10.3.0.136:6666"
|
||||
|
||||
# The interface used by Canal for host <-> host communication.
|
||||
# If left blank, then the interface is chosen using the node's
|
||||
# default route.
|
||||
canal_iface: ""
|
||||
|
||||
# Whether or not to masquerade traffic to destinations not within
|
||||
# the pod network.
|
||||
masquerade: "true"
|
||||
|
||||
# The CNI network configuration to install on each node. The special
|
||||
# values in this config will be automatically populated.
|
||||
cni_network_config: |-
|
||||
{
|
||||
"name": "canal",
|
||||
"type": "flannel",
|
||||
"delegate": {
|
||||
"type": "calico",
|
||||
"etcd_endpoints": "__ETCD_ENDPOINTS__",
|
||||
"log_level": "info",
|
||||
"policy": {
|
||||
"type": "k8s",
|
||||
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
|
||||
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
|
||||
},
|
||||
"kubernetes": {
|
||||
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
|
||||
}
|
||||
}
|
||||
}
|
@ -1,368 +0,0 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: canal-etcd
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: canal-etcd
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
labels:
|
||||
k8s-app: canal-etcd
|
||||
spec:
|
||||
# Only run this pod on the master.
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: canal-etcd
|
||||
image: quay.io/coreos/etcd:v3.1.4
|
||||
env:
|
||||
- name: ETCD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
command: ["/bin/sh","-c"]
|
||||
args: ["/usr/local/bin/etcd --name=calico --data-dir=/var/etcd/calico-data --advertise-client-urls=http://$ETCD_IP:6666 --listen-client-urls=http://0.0.0.0:6666 --listen-peer-urls=http://0.0.0.0:6667"]
|
||||
volumeMounts:
|
||||
- name: var-etcd
|
||||
mountPath: /var/etcd
|
||||
volumes:
|
||||
- name: var-etcd
|
||||
hostPath:
|
||||
path: /var/etcd
|
||||
|
||||
---
|
||||
# This manfiest installs the Service which gets traffic to the Calico
|
||||
# etcd.
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: canal-etcd
|
||||
name: canal-etcd
|
||||
namespace: kube-system
|
||||
spec:
|
||||
# Select the canal-etcd pod running on the master.
|
||||
selector:
|
||||
k8s-app: canal-etcd
|
||||
# This ClusterIP needs to be known in advance, since we cannot rely
|
||||
# on DNS to get access to etcd.
|
||||
clusterIP: 10.3.0.136
|
||||
ports:
|
||||
- port: 6666
|
||||
---
|
||||
# This manifest installs the per-node agents, as well
|
||||
# as the CNI plugins and network config on
|
||||
# each master and worker node in a Kubernetes cluster.
|
||||
kind: DaemonSet
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
name: canal-node
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: canal-node
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: canal-node
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: canal-node
|
||||
spec:
|
||||
hostNetwork: true
|
||||
serviceAccountName: calico-cni-plugin
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
# Runs the flannel daemon to enable vxlan networking between
|
||||
# container hosts.
|
||||
- name: flannel
|
||||
image: quay.io/coreos/flannel:v0.7.1
|
||||
env:
|
||||
# The location of the etcd cluster.
|
||||
- name: FLANNELD_ETCD_ENDPOINTS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: canal-config
|
||||
key: etcd_endpoints
|
||||
# The interface flannel should run on.
|
||||
- name: FLANNELD_IFACE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: canal-config
|
||||
key: canal_iface
|
||||
# Perform masquerade on traffic leaving the pod cidr.
|
||||
- name: FLANNELD_IP_MASQ
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: canal-config
|
||||
key: masquerade
|
||||
# Write the subnet.env file to the mounted directory.
|
||||
- name: FLANNELD_SUBNET_FILE
|
||||
value: "/run/flannel/subnet.env"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /etc/resolv.conf
|
||||
name: resolv
|
||||
- mountPath: /run/flannel
|
||||
name: run-flannel
|
||||
# Runs calico/node container on each Kubernetes node. This
|
||||
# container programs network policy and local routes on each
|
||||
# host.
|
||||
- name: calico-node
|
||||
image: quay.io/calico/node:v1.1.3
|
||||
env:
|
||||
# The location of the etcd cluster.
|
||||
- name: ETCD_ENDPOINTS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: canal-config
|
||||
key: etcd_endpoints
|
||||
# Disable Calico BGP. Calico is simply enforcing policy.
|
||||
- name: CALICO_NETWORKING
|
||||
value: "false"
|
||||
# Disable file logging so `kubectl logs` works.
|
||||
- name: CALICO_DISABLE_FILE_LOGGING
|
||||
value: "true"
|
||||
# All pods to speak to services that resolve to the same host.
|
||||
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
|
||||
value: "ACCEPT"
|
||||
securityContext:
|
||||
privileged: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 250m
|
||||
volumeMounts:
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
- mountPath: /var/run/calico
|
||||
name: var-run-calico
|
||||
readOnly: false
|
||||
# This container installs the Calico CNI binaries
|
||||
# and CNI network config file on each node.
|
||||
- name: install-calico-cni
|
||||
image: quay.io/calico/cni:v1.7.0
|
||||
imagePullPolicy: Always
|
||||
command: ["/install-cni.sh"]
|
||||
env:
|
||||
# The name of the CNI network config file to install.
|
||||
- name: CNI_CONF_NAME
|
||||
value: "10-canal.conf"
|
||||
# The location of the etcd cluster.
|
||||
- name: ETCD_ENDPOINTS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: canal-config
|
||||
key: etcd_endpoints
|
||||
# The CNI network config to install on each node.
|
||||
- name: CNI_NETWORK_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: canal-config
|
||||
key: cni_network_config
|
||||
volumeMounts:
|
||||
- mountPath: /host/opt/cni/bin
|
||||
name: cni-bin-dir
|
||||
- mountPath: /host/etc/cni/net.d
|
||||
name: cni-net-dir
|
||||
volumes:
|
||||
# Used by calico/node.
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: var-run-calico
|
||||
hostPath:
|
||||
path: /var/run/calico
|
||||
# Used to install CNI.
|
||||
- name: cni-bin-dir
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
- name: cni-net-dir
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
# Used by flannel daemon.
|
||||
- name: run-flannel
|
||||
hostPath:
|
||||
path: /run/flannel
|
||||
- name: resolv
|
||||
hostPath:
|
||||
path: /etc/resolv.conf
|
||||
|
||||
---
|
||||
|
||||
# This manifest deploys a Job which performs one time
|
||||
# configuration of Canal.
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: configure-canal
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: canal
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: configure-canal
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
hostNetwork: true
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
# Writes basic flannel configuration to etcd.
|
||||
- name: configure-flannel
|
||||
image: quay.io/coreos/etcd:v3.1.4
|
||||
command:
|
||||
- "etcdctl"
|
||||
- "--no-sync"
|
||||
- "set"
|
||||
- "/coreos.com/network/config"
|
||||
- '{ "Network": "10.2.0.0/16", "Backend": {"Type": "vxlan"} }'
|
||||
env:
|
||||
# The location of the etcd cluster.
|
||||
- name: ETCDCTL_PEERS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: canal-config
|
||||
key: etcd_endpoints
|
||||
|
||||
---
|
||||
|
||||
# This manifest deploys the Calico policy controller on Kubernetes.
|
||||
# See https://github.com/projectcalico/k8s-policy
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: ReplicaSet
|
||||
metadata:
|
||||
name: calico-policy-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-policy
|
||||
spec:
|
||||
# The policy controller can only have a single active instance.
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
name: calico-policy-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-policy
|
||||
spec:
|
||||
# The policy controller must run in the host network namespace so that
|
||||
# it isn't governed by policy that would prevent it from working.
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
serviceAccountName: calico-policy-controller
|
||||
containers:
|
||||
- name: calico-policy-controller
|
||||
image: quay.io/calico/kube-policy-controller:v0.5.4
|
||||
env:
|
||||
# The location of the Calico etcd cluster.
|
||||
- name: ETCD_ENDPOINTS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: canal-config
|
||||
key: etcd_endpoints
|
||||
# The location of the Kubernetes API. Use the default Kubernetes
|
||||
# service for API access.
|
||||
- name: K8S_API
|
||||
value: "https://kubernetes.default:443"
|
||||
# Since we're running in the host namespace and might not have KubeDNS
|
||||
# access, configure the container's /etc/hosts to resolve
|
||||
# kubernetes.default to the correct service clusterIP.
|
||||
- name: CONFIGURE_ETC_HOSTS
|
||||
value: "true"
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico-cni-plugin
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-cni-plugin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-cni-plugin
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: calico-cni-plugin
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-cni-plugin
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico-policy-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-policy-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-policy-controller
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: calico-policy-controller
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
- extensions
|
||||
resources:
|
||||
- pods
|
||||
- namespaces
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-policy-controller
|
||||
namespace: kube-system
|
@ -1,56 +0,0 @@
|
||||
---
|
||||
apiVersion: "extensions/v1beta1"
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-proxy
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
component: kube-proxy
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: node
|
||||
component: kube-proxy
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-proxy
|
||||
image: quay.io/coreos/hyperkube:v1.6.2_coreos.0
|
||||
command:
|
||||
- /hyperkube
|
||||
- proxy
|
||||
- --cluster-cidr=10.2.0.0/16
|
||||
- --hostname-override=$(NODE_NAME)
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig
|
||||
- --proxy-mode=iptables
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /etc/ssl/certs
|
||||
name: ssl-certs-host
|
||||
readOnly: true
|
||||
- name: etc-kubernetes
|
||||
mountPath: /etc/kubernetes
|
||||
readOnly: true
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /usr/share/ca-certificates
|
||||
name: ssl-certs-host
|
||||
- name: etc-kubernetes
|
||||
hostPath:
|
||||
path: /etc/kubernetes
|
@ -1,12 +0,0 @@
|
||||
---
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: kube-scheduler
|
||||
namespace: kube-system
|
||||
spec:
|
||||
minAvailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
tier: control-plane
|
||||
component: kube-scheduler
|
@ -1,56 +0,0 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kube-scheduler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: control-plane
|
||||
component: kube-scheduler
|
||||
spec:
|
||||
replicas: 2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: control-plane
|
||||
component: kube-scheduler
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: tier
|
||||
operator: In
|
||||
values:
|
||||
- control-plane
|
||||
- key: component
|
||||
operator: In
|
||||
values:
|
||||
- kube-scheduler
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: kube-scheduler
|
||||
image: quay.io/coreos/hyperkube:v1.6.2_coreos.0
|
||||
command:
|
||||
- ./hyperkube
|
||||
- scheduler
|
||||
- --leader-elect=true
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10251 # Note: Using default port. Update if --port option is set differently.
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
@ -1,14 +0,0 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1alpha1
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1alpha1
|
||||
metadata:
|
||||
name: system:default-sa
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: default
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
@ -1,59 +0,0 @@
|
||||
---
|
||||
apiVersion: "extensions/v1beta1"
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: pod-checkpointer
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: control-plane
|
||||
component: pod-checkpointer
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: control-plane
|
||||
component: pod-checkpointer
|
||||
annotations:
|
||||
checkpointer.alpha.coreos.com/checkpoint: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: checkpoint
|
||||
image: quay.io/coreos/pod-checkpointer:20cf8b9a6018731a0770192f30dfa7a1941521e3
|
||||
command:
|
||||
- /checkpoint
|
||||
- --v=4
|
||||
- --lock-file=/var/run/lock/pod-checkpointer.lock
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- mountPath: /etc/kubernetes
|
||||
name: etc-kubernetes
|
||||
- mountPath: /var/run
|
||||
name: var-run
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
restartPolicy: Always
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: etc-kubernetes
|
||||
hostPath:
|
||||
path: /etc/kubernetes
|
||||
- name: var-run
|
||||
hostPath:
|
||||
path: /var/run
|
@ -1,21 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDhDCCAmygAwIBAgIIYRTnEUWPB2EwDQYJKoZIhvcNAQELBQAwJTERMA8GA1UE
|
||||
ChMIYm9vdGt1YmUxEDAOBgNVBAMTB2t1YmUtY2EwHhcNMTcwNTE5MTg0MTIwWhcN
|
||||
MTgwNTE5MTg0MTIxWjAvMRQwEgYDVQQKEwtrdWJlLW1hc3RlcjEXMBUGA1UEAxMO
|
||||
a3ViZS1hcGlzZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDB
|
||||
sXDQGt4CSkm+H0oT3HgzADzK3IQtc5QVKTb2DTyw2/m+h4MRd6n+lra8pto09Is/
|
||||
YiVx8OCCFFsO726ZZqLQlQePDF36QKJbpIyGq2b3GVByDQqtn47xhXUeLu0z7IMK
|
||||
8906xmZXeg8HHTIS9P66z3xA9kLn0nwSSFJHGTXMoFr8cnLySnrtDHe9pGo/+jcR
|
||||
0+jiH3at3w2F1tCaTZ8znEMRP80BTysb7IlZdmNBfaSoT45Nje2eBpZDdxvI8qhi
|
||||
J2ZWZ7vQsu6AlCneKpTj4tgsV6sEAgs2V8pabRaSM5t0Hq1lGo/npcOamIUQAq1u
|
||||
O2SpSTIojdSHmWdD9h5dAgMBAAGjga0wgaowDgYDVR0PAQH/BAQDAgWgMB0GA1Ud
|
||||
JQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjB5BgNVHREEcjBwggprdWJlcm5ldGVz
|
||||
ggprdWJlcm5ldGVzghJrdWJlcm5ldGVzLmRlZmF1bHSCFmt1YmVybmV0ZXMuZGVm
|
||||
YXVsdC5zdmOCJGt1YmVybmV0ZXMuZGVmYXVsdC5zdmMuY2x1c3Rlci5sb2NhbIcE
|
||||
CgMAATANBgkqhkiG9w0BAQsFAAOCAQEAj8G9Lc75QNbhADQuOXsSGEi6bJ0UdFoV
|
||||
vv5VLNMOdZ0+jXtHtLYrB3RygIcolSdqlaqpM9nj61xgnhG3OIYIw8BCqQlaBgO+
|
||||
5cAvzmql29AoDbLeu3JctmySScqyCj4mqtlOGHgIotUq226Re1aqSJ8zLH7UDVEa
|
||||
jyQo8vn5GQm/XwyGUt4nSpYXMi6MztebcePdyOe4387NFJS9/OUQIdWlhv1cegK+
|
||||
fU8KRv2MiBfZZqJ1DQD17eV9494DImGN1nCpVlmPNBGTCe75SOYCBOwYhHKoNMLn
|
||||
YmtnpzBtfAkU4EzjiMm6V22XI/lZsQdxeQfMMScmh+M60DHr7ToRdg==
|
||||
-----END CERTIFICATE-----
|
@ -1,27 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpAIBAAKCAQEAwbFw0BreAkpJvh9KE9x4MwA8ytyELXOUFSk29g08sNv5voeD
|
||||
EXep/pa2vKbaNPSLP2IlcfDgghRbDu9umWai0JUHjwxd+kCiW6SMhqtm9xlQcg0K
|
||||
rZ+O8YV1Hi7tM+yDCvPdOsZmV3oPBx0yEvT+us98QPZC59J8EkhSRxk1zKBa/HJy
|
||||
8kp67Qx3vaRqP/o3EdPo4h92rd8NhdbQmk2fM5xDET/NAU8rG+yJWXZjQX2kqE+O
|
||||
TY3tngaWQ3cbyPKoYidmVme70LLugJQp3iqU4+LYLFerBAILNlfKWm0WkjObdB6t
|
||||
ZRqP56XDmpiFEAKtbjtkqUkyKI3Uh5lnQ/YeXQIDAQABAoIBAERN1ZGdl+LI3b5s
|
||||
/EuKuNyLXeTP5NC+bF8V/KrCOj/IIwccdI0JXOpJrcFTOano/t3oN3o5zoIwuXfH
|
||||
2YHBHvNdSqAYZV+lwVt96IxpD1NeGu9NSBG4LclgHc/6Dm38Hq4TF1XttxNsGLaS
|
||||
hiEHQnkQSCoEbc2gfV5ZIKKv8jfpShYiaAPzrt3saE/2+OliJ5p6zfXKNlEsg1US
|
||||
78g+JiOVXZdEQFyPP5Yo8gje8wQ2NetnilQQ9rtBbPv9FfsTrj03srlU2D7IIBdQ
|
||||
7D3Z5AN7e7RiwRGmStZ4GllcCuhvjhvfhav132G01o8/DwvVLTnfSKFA7+E8UYG9
|
||||
6ZAzX4UCgYEA/pXt8ehj3s1f8cNaSEJlD8AsOHgzcuxRvdrE+zA8l4eEFpP5UJej
|
||||
OcDu966q1yt4Qp7Yx2sW3UA76m7RugdqA5MP25fgzGV5n22iwYbnBQvqDQEOjMH1
|
||||
1k0CkaRXhDCzGuwb07og/rhOJdCI3OSCQpLD6BsX8MVPJ/2Gfe4XECcCgYEAwsTo
|
||||
/iNZFNKkPwlfjpirry6gB7GZYRYdkneMM92fTzuDdqSIrM9oLBeUyixAfPP9c0yV
|
||||
2cwhc8TLdHxIwatzNNJkwp2+eANfg8jQ0vK9J8V0649C5iM3aZ5MUVG2IS4RAZtH
|
||||
MG2w5fvdd7SqJ8ROWUy7+E0s472yfJNL3auNa9sCgYEA5AXPwEsAII/cboMlevEU
|
||||
6Z2bPdzTYAywThcDNWSH8MStFzfkJz4aMWFP6EHmvKAvr6Psz/hn2zVsNNabPD7l
|
||||
wlvW6T1IWGpPG++rxiCZDJkWQh1/Na2IDjCdq2sCA+FGmkd9yQ69/MeBHzd/TjHR
|
||||
ReWEWIDj2YAwHMZjzqkQuSMCgYA10Kp/7cxjUIBJWpGomM53LO7SsWOry6yIF7gJ
|
||||
bKbkAZGlanjJJtWluS5HXkrDO7c/8F1HPHvRvQJqQRzpRjIi2i81Btjl2CjABPCO
|
||||
GLvjDU/s9jyJ0hkxeaekoGsuZ8gTJZBZ9TT3lsvuk2CgdEEhs24MgWZx1qxGd3xy
|
||||
1z/QGQKBgQCE7afZwIEUQ6epGlUPwmm+mqGiUNbXAz/PnK/IhuOeV9aEU78lPH8p
|
||||
6rMpuh5SOqGcRaZhpRSIY3vqU9Yk49OO4BOawF2j8k4pVkTaJGgD71in8aDbcVBc
|
||||
VlIMP2q93mnyO7OC8znQKHMs5WRWEokRbSsjWEeQF1MtyBWaIiWmlg==
|
||||
-----END RSA PRIVATE KEY-----
|
@ -1,18 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIC6DCCAdCgAwIBAgIBADANBgkqhkiG9w0BAQsFADAlMREwDwYDVQQKEwhib290
|
||||
a3ViZTEQMA4GA1UEAxMHa3ViZS1jYTAeFw0xNzA1MTkxODQxMjBaFw0yNzA1MTcx
|
||||
ODQxMjBaMCUxETAPBgNVBAoTCGJvb3RrdWJlMRAwDgYDVQQDEwdrdWJlLWNhMIIB
|
||||
IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAulAVfzTe/mMl31NAx7P524sz
|
||||
nQKmxG+BXfDPt4O778tBF76RsEX+wKrRtooBr7axhvR0ok5kDZPARGpNKARmdCSm
|
||||
336ErFtqTwMoreY7WVCU2CBFOtt2umfJDuGVoNUHEkD8MeV2lYJCoxwJrhe5wiqq
|
||||
m4hptSCepUjilmkReWQ+/N4+RVDpr86GY2QBUlv9OtA5hxTisbA01SwSPAWrpOqV
|
||||
8JIj2RLZn85FTzMFTQk0Wu0Zugiryqdaxl33VL3+URI3QC2r2dpvd1SeyWDEXvjm
|
||||
kn9238we+2wBeRaceCvC7jyDvYSOhS+j92wFdnQYx+HinA8nn8Qfdm38u6A9hwID
|
||||
AQABoyMwITAOBgNVHQ8BAf8EBAMCAqQwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG
|
||||
9w0BAQsFAAOCAQEADHvgtDCE8tv0lKIzEqfubUA5LKQ4NiT5SUAucYazMpKw1QIW
|
||||
QinCoLEfyPMwgkbgXjzwne8PxeEjjvwCRqwbyViBWnv937p94poZ/9G3CW3bSYwQ
|
||||
4ZeZnwW6wW0IGsEheMwknBeQboocM6cXu8hto1AYHOnjtg2t1RufWpsDn5aokuW/
|
||||
RI8Hg5vnWWKAAAwcwkmg8aiN/1nYQG/coD41kXe/iJ1DTPZa2CPxgm71f2hRnEYT
|
||||
c7uT7uueBapo1O+ttPkghsIvPZKc6vKxK0wrvzHGRoULl77Z83z92aoPLzcmnJ3d
|
||||
MFEq4d7JQ5u5i+SaqqqOdp1RGAiuiNpcvyP9ew==
|
||||
-----END CERTIFICATE-----
|
@ -1,27 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpAIBAAKCAQEAulAVfzTe/mMl31NAx7P524sznQKmxG+BXfDPt4O778tBF76R
|
||||
sEX+wKrRtooBr7axhvR0ok5kDZPARGpNKARmdCSm336ErFtqTwMoreY7WVCU2CBF
|
||||
Ott2umfJDuGVoNUHEkD8MeV2lYJCoxwJrhe5wiqqm4hptSCepUjilmkReWQ+/N4+
|
||||
RVDpr86GY2QBUlv9OtA5hxTisbA01SwSPAWrpOqV8JIj2RLZn85FTzMFTQk0Wu0Z
|
||||
ugiryqdaxl33VL3+URI3QC2r2dpvd1SeyWDEXvjmkn9238we+2wBeRaceCvC7jyD
|
||||
vYSOhS+j92wFdnQYx+HinA8nn8Qfdm38u6A9hwIDAQABAoIBADpNLSztQoqgRA2q
|
||||
Y68aZqmI2dHcLotxyS24WYe3tWvIUso3XCeo/5sS2SUh8n0l0k/E12qi1TRac+P0
|
||||
z8gh+F2HyqBNWv8EbDPlbSldzlyYlrs6/e75FiImsAf0F3qIrvnLVB/ZCk6mwGuC
|
||||
LpVH310fNNwOx+ViG8LlF+KxZkJxzoKQ2RwiCwzMzpvNBTJyEE1jfqNlc92XnP65
|
||||
FhjcFfzSJhFK3VH1gdpfO8bUiLiiUhzKzXH7Af73UqZ22wHeYx87ZJBv7e9ymbWT
|
||||
GMf9js92e3OdXa3al75JlXgexSDmV2OdZNj6zpqAyupo5b+jXNxcxDaQCitOAcyU
|
||||
H6HqMiECgYEAwWeEvOL/JC1hFBniM3jtG7ZcXjT1nuc0I9z+b0O6i3JXp1AXuxqU
|
||||
COOn0udgJ4SJZZk2LOja7Mq6DsPvbPK9OA/XvSju6U/cqALpLdT+bvcG1J5km80w
|
||||
F9d5a8CmABYsIzIm5VOYCZN/ELxo9uzDhNpiU1m7EVZengg8E1/xSpMCgYEA9pz/
|
||||
SGZTFHdLZn7jgg9EzdnjZ2SlSnGc1tHayiRbHknwt8JFMwHeL/TPI6/4ns4A8l59
|
||||
IEl1Zf8pWDhwa2qGITXQBmauLYzuPGSIBdABLnJQtE4r6o+vYafZxZVvTAv5B4Sz
|
||||
TCWFkLYtvHvs71+u7IKS+dJg3EYy3Gx5KVhddb0CgYAr8QMdj018wLqvwHm+TBlD
|
||||
FJnD5bBwnAMiqtE8Il091YrIvs/FePJtWpwEtQEJuXkmFjtS1Mz4w86mECpTzIrl
|
||||
M+RGXAh8BeMSYSbtfNkaCRIKOLqPE317zT8PFkQg/OimTny72dRPSK2z9bq7b2u0
|
||||
wZFZcqen9sGkkiZkGIZP9QKBgQDcgX6FVvD8QLqLl/OHLG3h/ewmW8irqrCJKDUQ
|
||||
P7e1enmhZTSIqifoC2ZXS5XrMNmJ3VDWWLh/DcsDFdv3P9VUxpAN2SvukK/IEj/J
|
||||
qrYTuKVOwwLjhbxUfkfrMnXEsoPl5BKJiJdH0I1OliRB2PVIhmwysphm/OGnU9p2
|
||||
TIuspQKBgQCq5QJcny6CWHnFh/Q1faYqIjvaS4MqLfnDndvZ98abpcjik3AKgWof
|
||||
iaROSk40L+q4uDuaM5tU1ufS/FS94hwlk2O1bQ/xgJBkNZnvZJRFU3oZjhggyl6G
|
||||
iFtBTAGGtJqHTPMtn/Y6dUOJ/ZFIZWzuNhJGYX/S3ifpZeldKXmXew==
|
||||
-----END RSA PRIVATE KEY-----
|
@ -1,19 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDAzCCAeugAwIBAgIILMPkLd2E/uAwDQYJKoZIhvcNAQELBQAwJTERMA8GA1UE
|
||||
ChMIYm9vdGt1YmUxEDAOBgNVBAMTB2t1YmUtY2EwHhcNMTcwNTE5MTg0MTIwWhcN
|
||||
MTgwNTE5MTg0MTIxWjArMRcwFQYDVQQKEw5zeXN0ZW06bWFzdGVyczEQMA4GA1UE
|
||||
AxMHa3ViZWxldDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALtz9mHo
|
||||
tPkidPbQeu9RS6tAOQhAhPOzV7y5kxo9ZkyGR5mOJ5MElfoofHWGXDqJs3IHO6Zr
|
||||
ZTKTYgX6c3jisMhIT62JnN9ZaATWcrd+qQ15ixTNhqdy3UcX6xlB8YF8KpVZ40rO
|
||||
wrP/UsG9EaBit37iOmmINIkZtbNIhvOYhkJvr+NOtX/8TsnRZpT9PyCeyZJbsZIZ
|
||||
d1Apfu2ENeS1C1OgOQIEOREBehc3GVH11D9BRtFob22MjZUjxyGj0SButUmpvnY9
|
||||
ogfE5pT0yhI+kZlP6iMPkk0oGlkcc+U4X8VrSyYXfJNEbmI5aDZe3A4lk4fXiF/Y
|
||||
NosbHYnzdf/j0acCAwEAAaMxMC8wDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQG
|
||||
CCsGAQUFBwMBBggrBgEFBQcDAjANBgkqhkiG9w0BAQsFAAOCAQEAIgaxO6aAyGRq
|
||||
MINPID5bG/ZSRoIBSEX0bAviLKWP9RonjfayM8Xb3r2WZ4TmJoYYDNMRFoyCeStw
|
||||
1fjl7b2vpmFBOxlpmRvNhRF1dlI9Rt4GRRVkxeS7c4dkc0LFTHEPp0X/RmSt4uf+
|
||||
X9sYsWOGSBf52+qZ/7UNI6SYwoltenzbwnLHY9NSLXiVFommCXPaBma1GlkQN2F3
|
||||
cEInhf78BXKXeIpWdZboHuWOUu3aoRT0p6fegb2Uxh2a73s6sToHjE7oy3H2ZvKR
|
||||
kcFJ2TnKMrqzEK/9wyc/gu/kYVx8/zCoPlDQASem7aTZgOIDZ8wc4g9rBitnxdIs
|
||||
jxZwjOKt9g==
|
||||
-----END CERTIFICATE-----
|
@ -1,27 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpgIBAAKCAQEAu3P2Yei0+SJ09tB671FLq0A5CECE87NXvLmTGj1mTIZHmY4n
|
||||
kwSV+ih8dYZcOomzcgc7pmtlMpNiBfpzeOKwyEhPrYmc31loBNZyt36pDXmLFM2G
|
||||
p3LdRxfrGUHxgXwqlVnjSs7Cs/9Swb0RoGK3fuI6aYg0iRm1s0iG85iGQm+v4061
|
||||
f/xOydFmlP0/IJ7Jkluxkhl3UCl+7YQ15LULU6A5AgQ5EQF6FzcZUfXUP0FG0Whv
|
||||
bYyNlSPHIaPRIG61Sam+dj2iB8TmlPTKEj6RmU/qIw+STSgaWRxz5ThfxWtLJhd8
|
||||
k0RuYjloNl7cDiWTh9eIX9g2ixsdifN1/+PRpwIDAQABAoIBAQCRpzJbs4DjUHXH
|
||||
zgin6eg9AaMPGWr1HXZgC2YU7n6NmY0K8N0pLFgIz+qdOzBwv8xyHtKnpi001jZF
|
||||
ZOzSknpAtYdL1XDST1s23xa2I7Hh6X47RNOLSwJLGnev4YBxV3STJgwpdWzuhcbd
|
||||
CTcoA2yHJ+uxUodXvGVmEEXkA7DW7zLZpvLJ//nD5z5CM0IUPdaSgXhYQp2NZWtI
|
||||
RjLdjkuYVyBYC2rU4LpmiH1eIVL7bDHoUQhOaHN0wSFG80o46gvrqbhrMPw7BwIu
|
||||
bCW30q4Y4JPRYn5ru0zCForne65I2kRtnJUDjn99dOntWVZibRojY0hFFEyGYOjZ
|
||||
WItzGAbxAoGBANFj2ZHitQxtqYs7MNIY9jz/7pzuPaX8dm+2/3WW5Aot01+s4yVH
|
||||
pd7HE8l5NjnejWG7nG2GPsIhbCCVXEtSMGt1BRioKpc2dLq+ZQb75LGDMaJzMWEm
|
||||
/HimJuhXvxOzzKC9Z29vo4d6JC58vPwyu27dFAv3rzAcdiWb/aib7S6ZAoGBAOUu
|
||||
BePZgqlpwl3wqDlAljiLsH8AeZUH2rDA4n4d+1kKPMqJYMmftGaTkDJMeJfisfKb
|
||||
EXcQsGJAeOLHRpY1VvkHqn5v+7qg9JHSnlw+/nTF5Vk6ISAFMs2Qfwdq6fZ898GZ
|
||||
mi9VXr0hez7Z/v/liCxBcl0hgAhnjIFGvQ5rSmo/AoGBAIvlVFWdzCyTj/UQBNw6
|
||||
BTpYHAoJOnMNq+uTrjXYLF+IonKHxfMAXZfsFhJDw7ECOh+UAz1BtehqAB387H7+
|
||||
WI9SzabdpCcHIRIrZsA1x2O6LY1FvTYVoBTTnacaCPWW6R5zrQnM4sr/FfFhMbqm
|
||||
AohdeKlOQGO6gE08XUsrclnxAoGBALOv+f5DtCaQPUzaO4toEGAVZjStcqZemiCr
|
||||
mum3KDMPy8ohHDn5dcBXQl+thX/QxiSpYHAyLZlbY2yrQbTT7XUjhZHMy1nwiNEs
|
||||
ie1ZlriH0OK8qOwqJ0L1YCO4t+gC415vyGwES1uOvMrysPSCStooFjre4Tu1tHxH
|
||||
skNz68yRAoGBAJyMFoQu0rzOxCwQx+8m1encm9pcUvu2eSwwy+9460W474Ww4qZA
|
||||
F4DWwjDg5dBG1Im21KIJkhoX579dh3QIRr3PRwlQUkQlxTrUSEtpfNTU3pvWV9BF
|
||||
tuLS1TnOdweoQ8cGZZd9PWMLLrBd0JeR4FyH23rOUmMFwJ2A6OopeX6B
|
||||
-----END RSA PRIVATE KEY-----
|
@ -1,27 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpAIBAAKCAQEA1OJQmE9JCI20h3BI/xJpQoNIfYviHIhlx6Al60Kv4Zb+taD+
|
||||
Jd6pCbHqjgYyiYH1wq0nMC9MiRbphdMsKfJXo57H2X1QWNc+3RYzNEL2ra2rkCGw
|
||||
q1jKGk6RofagbrinjAC9hGcm/V713fCdSpULH6Ruro9Kjvtca0nLjBcGC03pkuUi
|
||||
1e7EPj2SALQxA1iV2+sqqpg2axlpyAN7gecafjVN10kkMw9GKumQqUpejCtf3tTv
|
||||
zzfmGqiNnHDB8lDnXpHecKIZkfXdH5Pd4jRY5DyFfrsL5xy0OHF4rA/EDSFkdEZ2
|
||||
rTYiCB/O17pw6LuEu79V3N2hJVEwe4Uti3olQwIDAQABAoIBAHSWjXUc1u6sTNZw
|
||||
FEo9lxAqPiUj2u2tdbBicOHrX8+4lj56sTWkQAdjPQYTNtJALowzsGafQNdDiRkV
|
||||
kfZXFtAxQVpHWx2MpI0If3p7wgVUO8Vv7gWpVuYZaYC+RRbeYkQ2k5RTufLBcv3d
|
||||
rQcPoUvvDf7j0v2DhBXuEF/krBa70OnI6Fv5b6Tay4cN6vmNJSPUlDPvicCizmvV
|
||||
WtAq5pkPfXW1uweMYDOSD10zaetclMae/0C1hahk9kGoLv49XnKCX/Luzwx0ShJL
|
||||
F0Zk+0s9nmMAAfRL8JM7E9iwXa8I4zXpaNON5RfzdUQeU6puhNQrMExrfzFYWYVl
|
||||
rPaRnqECgYEA4C7i9B08wR+JEbKeEvTTzUAS8W+S5lSkzPN75Tt4aHeTojzvRXa0
|
||||
nUvbr+0PGctpa3OwDzh/SayKqkJvWzxWmzKELTsWkpUZLyx37oxkoQ+dUKSFDYF7
|
||||
ejGYfqthUC65NA0rqmz6qiCK/RFXL1ihMY0f/74+IzChoiftpFQ0pt8CgYEA8xjn
|
||||
jHcBpGmUOyKRWkmTM1x3l5NhT2bZYy5CGPXZ8tiu6zdi2gw2xUmgVIPzUnTDqmOH
|
||||
NPuRvHv2sovqZsApDankwzsWthFLVFjPdpXjVa+Gvp6YN0FTeeIEjGujmCJ9Zj9b
|
||||
oIk4o6gRzQNx5L/RaE2/oQrTGwlCWeA44pH6gh0CgYEA0KZSzOk5VnVHWZVo0jPT
|
||||
vUBZYSR7EKzPBYHIWj3Tf0drvKACAiDNUWj8+uwkFdngMAXoYwIuVh+kn3pdsgii
|
||||
gqetpXtNMvhaDDHTHc7FCbJCtH+q5jsQ9VWbnKldVQdnkC6B6YisdBL9yTOOdZ6D
|
||||
yF6U3a3un0nv5cBLyZoltvkCgYEA5Aexc6ZSKQpMXGghlmK7rIsJN2qs9hFQy2Mh
|
||||
503+oni1I7jxhf29BrT4qy6W+PrEa7kuo/lzDC3wDC2Is9d+6u05xBRSSnjQg49H
|
||||
FEKnW8HpkDcuK26gwgzMHXf+nf+ER3wZE+6D7agDAp8/n8Z6xO9hWMvRmGPIFIxq
|
||||
b8VlCdUCgYBgwfUsSsCMP8KVOJAuwf4/SWOkIUUQHQUj1CyEz2UWG5QiP2wqFiA7
|
||||
IH8K8JsO9MSWq3ndR9kR+HGBCkJyyoD1GzBZeRhPb+69fYWao3lKUzEDqmxB7zjh
|
||||
NPltbLlGGNbPhczXyJeSv1N94MUwY1wt0aAX6G+HiBI8a3cjC/cQPg==
|
||||
-----END RSA PRIVATE KEY-----
|
@ -1,9 +0,0 @@
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1OJQmE9JCI20h3BI/xJp
|
||||
QoNIfYviHIhlx6Al60Kv4Zb+taD+Jd6pCbHqjgYyiYH1wq0nMC9MiRbphdMsKfJX
|
||||
o57H2X1QWNc+3RYzNEL2ra2rkCGwq1jKGk6RofagbrinjAC9hGcm/V713fCdSpUL
|
||||
H6Ruro9Kjvtca0nLjBcGC03pkuUi1e7EPj2SALQxA1iV2+sqqpg2axlpyAN7geca
|
||||
fjVN10kkMw9GKumQqUpejCtf3tTvzzfmGqiNnHDB8lDnXpHecKIZkfXdH5Pd4jRY
|
||||
5DyFfrsL5xy0OHF4rA/EDSFkdEZ2rTYiCB/O17pw6LuEu79V3N2hJVEwe4Uti3ol
|
||||
QwIDAQAB
|
||||
-----END PUBLIC KEY-----
|
16
example/gen-ca/ca-csr.json
Normal file
16
example/gen-ca/ca-csr.json
Normal file
@ -0,0 +1,16 @@
|
||||
{
|
||||
"CN": "Kubernetes",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 4096
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "US",
|
||||
"L": "Saint Louis",
|
||||
"O": "Kubernetes",
|
||||
"OU": "CA",
|
||||
"ST": "Missouri"
|
||||
}
|
||||
]
|
||||
}
|
129
example/vagrant-config.yaml
Normal file
129
example/vagrant-config.yaml
Normal file
@ -0,0 +1,129 @@
|
||||
---
|
||||
network:
|
||||
cluster_domain: cluster.local
|
||||
cluster_dns: 10.96.0.10
|
||||
kube_service_ip: 10.96.0.1
|
||||
pod_ip_cidr: 10.97.0.0/16
|
||||
service_ip_cidr: 10.96.0.0/16
|
||||
|
||||
nodes:
|
||||
n0:
|
||||
ip: 192.168.77.10
|
||||
roles:
|
||||
- master
|
||||
- genesis
|
||||
additional_labels:
|
||||
- beta.kubernetes.io/arch=amd64
|
||||
n1:
|
||||
ip: 192.168.77.11
|
||||
roles:
|
||||
- master
|
||||
additional_labels:
|
||||
- beta.kubernetes.io/arch=amd64
|
||||
n2:
|
||||
ip: 192.168.77.12
|
||||
roles:
|
||||
- master
|
||||
additional_labels:
|
||||
- beta.kubernetes.io/arch=amd64
|
||||
n3:
|
||||
ip: 192.168.77.13
|
||||
roles:
|
||||
- worker
|
||||
additional_labels:
|
||||
- beta.kubernetes.io/arch=amd64
|
||||
|
||||
pki:
|
||||
cluster-ca: |-
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDzjCCAragAwIBAgIUKwePtKtZf/KbwdhRke8d38V294IwDQYJKoZIhvcNAQEL
|
||||
BQAwbTELMAkGA1UEBhMCVVMxETAPBgNVBAgTCE1pc3NvdXJpMRQwEgYDVQQHEwtT
|
||||
YWludCBMb3VpczETMBEGA1UEChMKS3ViZXJuZXRlczELMAkGA1UECxMCQ0ExEzAR
|
||||
BgNVBAMTCkt1YmVybmV0ZXMwHhcNMTcwNjEzMTY1NzAwWhcNMjIwNjEyMTY1NzAw
|
||||
WjBtMQswCQYDVQQGEwJVUzERMA8GA1UECBMITWlzc291cmkxFDASBgNVBAcTC1Nh
|
||||
aW50IExvdWlzMRMwEQYDVQQKEwpLdWJlcm5ldGVzMQswCQYDVQQLEwJDQTETMBEG
|
||||
A1UEAxMKS3ViZXJuZXRlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
|
||||
AO8vjAoGyv6KigTnF6WZMoskzfCC2ZsLT22y457/irOe2EYazHbeXz/7Jlb8LwWn
|
||||
uMSaGlu/x5XfF3VGlMkq392S2CsfqLOO8AjUTn4YGOUx5IU++hh3SQ+cFrv/CF5l
|
||||
jeeXZGoSbMLhMvaWc3MHGCTNktBe4Q+DyRCyw81fMH+2C9dZtjH+cKOBUvhcMIDW
|
||||
z7i3MJ0th23PLyd9ZwVHDgyqUkzaY/zTgmSk1V++VJ9BwIn41/J/bW8peqPW1/cq
|
||||
B8BX45SBgyKgVRkQGppsDH7+MqDFzU2ZWP0R1EtGz68+TJObtl6yHY/pj8ksg7T1
|
||||
uQgZXzMrzsVoQNkiBKpiau0CAwEAAaNmMGQwDgYDVR0PAQH/BAQDAgEGMBIGA1Ud
|
||||
EwEB/wQIMAYBAf8CAQIwHQYDVR0OBBYEFOGG+QV7EZ7kGhFxzB3P+ve4MdCsMB8G
|
||||
A1UdIwQYMBaAFOGG+QV7EZ7kGhFxzB3P+ve4MdCsMA0GCSqGSIb3DQEBCwUAA4IB
|
||||
AQCvth3gmSivlS+6dBuoKxK52pqzqtVCMr3YSqc1ORsWh6FQA+2M2ZSHKgfgkqfK
|
||||
WaDkgV0FZl5IIQ2t3V8ZQEj+WI2crnoR6cTTz+vXOJXm780IpH717d3PTYKBv4sU
|
||||
t8BpNhePPNeH7ZrW5P9+EVZ0ZFPSICbI9k8MFGlSJp5zgM6sinXmRaK59cnfBgEc
|
||||
cCnjvuY/BzNIiABBSsg8Pj2hOduIVK0xP3DnqGkPV5BEQP/dmhe81CG1v6WQZpev
|
||||
qC+jSvZYETWMg3sCQoyYveBRBce9vo94VqcA99FNnDoYsf16dZnKO6mP8rta21zp
|
||||
O1G/5Sc5HA/MvMldKvLrtqG4
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
cluster-ca-key: |-
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpQIBAAKCAQEA7y+MCgbK/oqKBOcXpZkyiyTN8ILZmwtPbbLjnv+Ks57YRhrM
|
||||
dt5fP/smVvwvBae4xJoaW7/Hld8XdUaUySrf3ZLYKx+os47wCNROfhgY5THkhT76
|
||||
GHdJD5wWu/8IXmWN55dkahJswuEy9pZzcwcYJM2S0F7hD4PJELLDzV8wf7YL11m2
|
||||
Mf5wo4FS+FwwgNbPuLcwnS2Hbc8vJ31nBUcODKpSTNpj/NOCZKTVX75Un0HAifjX
|
||||
8n9tbyl6o9bX9yoHwFfjlIGDIqBVGRAammwMfv4yoMXNTZlY/RHUS0bPrz5Mk5u2
|
||||
XrIdj+mPySyDtPW5CBlfMyvOxWhA2SIEqmJq7QIDAQABAoIBAQCwCyLbTlyiNH2Z
|
||||
Vi2FaNhWqWQaHXTkNNLlPsFiCVuhEMzF7HuJEeqxQLzbUQma8/N+YJ394Y2YtXai
|
||||
jqx7096pSqdoNgkI/6+UEA8lp77LEonLuKqCz2kq4Aurmu4h7EUhq7/wglciqHXG
|
||||
IL4gb5xJmjTwwKSNssWOUMTkp6celwakyzh1w+Sgo0qRKu75RtdkBnaLd2i8DI9F
|
||||
N0v9aMO8zC317DVhTBw2Wl6ZK2P2kdh2BB54NPrRm8edfViz5p7oq/Fs3YHC6+Hn
|
||||
cJMU87Wkxi/tbs2YKdnQraokLK40EpdDOsokW/IguHanvY55VTllzT9o5lEvsFCA
|
||||
u0ZOasSBAoGBAPjDGgNkZP8WcmxhRFQRdaNn5/37g0I7mspdELNA8/7hPJGn9BCK
|
||||
r+Ozf6LSjW6m2XVmluyCJSU/HbETfz1lo5HHUCV6uyIZHuHRF0ORovGTZJFSzYzL
|
||||
WFs5JLe6dXwS096oxq2knWaVEocNbUOue2Ptui1izNlQ7yDFeS27VJ95AoGBAPYl
|
||||
Ha7ZbAsY5M7VIzJTie3dt6QnWs8qfd7pV3IUrAuCjYSDOBUWommV1Mbxw2SyYntf
|
||||
AvXBIbuzsbpFsjKEypyyud0XNj3hNFI1xAJKdAF213zQYs4nZZnI5YST7GGDEGwP
|
||||
jCBm1MKLzHyUJ2ip1hc5zEM11hA8OsvK0vvyuIYVAoGBAI4sc6Gcr1xbJ+ppbPPf
|
||||
RqytphmytcIU7tLZfcH1TX5OnJ9irksF+KDa5gfY7pxfH8nJaFijyTcQa5fY3M/q
|
||||
VyHqGBRToMBMOyo0pmcnxUjsRH4KJRBi54y7jBC1sI/I8u4+5842Vv9aE8y8D8au
|
||||
4jaql814ujs51nGUaz2H40WBAoGBAO+zM1XLu7CO3HsjCjR/L8mpaaV9AazO92a1
|
||||
m4en4+cNitzpoBrBQQZLd7sJQrt0D/2Oh+Zk3oHYuxHnv2H8+QZh8igA67yU7AvG
|
||||
+gs1EAVBAxY0JJQXv5RkFEboeoB3Tu28sjv3h+ewlkEXUc1V3vwdN/KXoc+Lp8I/
|
||||
0Piz5MgFAoGAJQMFyA6sU03vW9nmmZuT5SYOgDm/JpJ9/fSwCxtmOxlRjzxL0G/l
|
||||
OhnsGBv1NbtoDZ+YMYY/0BhOhv6yzIJMCDmi5yuCw0FysL4pAaW40NKiMtZSOBdH
|
||||
ZuATA+uF7kV7K+NbO7FT0knfNjFkk9jVbjq+To3D3/FbVxS9VTbu9nk=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
|
||||
sa: |-
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA6jYQ1LKjd/s7bcgxlw1o
|
||||
RR91Vb0MnSCUA4OSzJ5Hh0x8gOpllMpbeRdY4X605aOjYwku1Xlc9HFtjxMSDxjR
|
||||
jDaLQnVy+stNScFuOLn5VfWtgHJ68WlgZSzIjxveDGVFw2YguQMj8vMPNeCq2EAc
|
||||
/VFBWUXdNUC8/ipn2T4VA7DSjkZheNhHwigPIlS/kumfSXiIshMLM0P+Yx0wp72D
|
||||
vqp93C4523COw2DTyiv4azUYIGHBkyWtgfIES4gavxp2oFgvxcPvl1Y7XuHJzH0g
|
||||
ncZJVJS5o0WPFUzRlipyyZa0CxDKFkOy3pLQDEvn2mb5zL1rzd58kQowmLtP1aX7
|
||||
mQIDAQAB
|
||||
-----END PUBLIC KEY-----
|
||||
|
||||
sa-key: |-
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEogIBAAKCAQEA6jYQ1LKjd/s7bcgxlw1oRR91Vb0MnSCUA4OSzJ5Hh0x8gOpl
|
||||
lMpbeRdY4X605aOjYwku1Xlc9HFtjxMSDxjRjDaLQnVy+stNScFuOLn5VfWtgHJ6
|
||||
8WlgZSzIjxveDGVFw2YguQMj8vMPNeCq2EAc/VFBWUXdNUC8/ipn2T4VA7DSjkZh
|
||||
eNhHwigPIlS/kumfSXiIshMLM0P+Yx0wp72Dvqp93C4523COw2DTyiv4azUYIGHB
|
||||
kyWtgfIES4gavxp2oFgvxcPvl1Y7XuHJzH0gncZJVJS5o0WPFUzRlipyyZa0CxDK
|
||||
FkOy3pLQDEvn2mb5zL1rzd58kQowmLtP1aX7mQIDAQABAoIBADdEhNo8QVjpvw9b
|
||||
41/auRU+pCiUUOqvKl5d6QFCBG0H/oVJSqk+yzEa8k1b4gIiiEaxfwy+89F3Brxx
|
||||
apyHZcNph5kqL/TAjr9t1r2qHQ1MySF7YkmfbTDSzYz/rXlNWJYQfn5KIGyPMLKt
|
||||
DoOzNWQNjZcsZlPPsAlmJlVcUgcpeiPKEGYBwi/Xfp7kJZjr+jxn3U/VImiDBuA/
|
||||
ipdqfzUsQc363mSnRCHGptmv3TBJh4TXpuoxAkjEryKhXDTjsDGWt4hqZJBZiF0I
|
||||
eGAnhvignqle+fkTGwszUrz/8PMAdWUGeTQ/DsWcUUgGzbu7Q1libFo0mj+BA9fM
|
||||
Y9De4wECgYEA97UDxjZX58RHTedpnUQFgg64ZPmKMYe9nQHvRCw1/9SRoUN/1zid
|
||||
Zaz+IbNvjpBpwBwhxg1ISG0Wo02iMlbtOXsJnmE9o45FnyH/8uDfxj93pklaopxY
|
||||
1GwGnR4q8xgUxol7rbL5mHBbcwXxAbU7uCFlTKmXEs5SzvJflMBCaqECgYEA8g1i
|
||||
QPFSCdqXVRRm/u6js62QYyitXQLrlQWhr2Jd1vxD4ngPRE0mR3qe37RldcEO6x8Y
|
||||
zeurj5g1pZFZOOcLZvBSE0TxFYMtsxa+42huAgUOs9RKtDfjgcMCRTcuCBQkpGXb
|
||||
hpVPUTpm/VcAmoUYu1frFoo/0vkS3e/JLCPDJfkCgYB9Q+cSt6ygohvFA7/fLeTz
|
||||
LmqFdcQy5Ag5fB75hLoSE/dJbA8cUZ8XKfKiLFG/8Lvp0NArjc/+AFywXLQnbNou
|
||||
dVAZ7ebz7SC8Jr9+ncXMRZBGYVYaYaJyWebGUdk6cfUfqasH3jhmpHs6ociNKo92
|
||||
wDywFhs2AWzTBrLbUJbFwQKBgAam2YFhYFjG+gurgN0Wn8cLSQGAl6sLrn+s5PGV
|
||||
6XBEBHWPyROebyPducn6AiPHR2qssxjNlixfCXJgWSxYJRcSGZ9P8LQfo7zdLie/
|
||||
se46R1onxlnHg2gIfOJ8DrbIHu2pouvC5Kgdy8DAiFK2v6Q+WUaITBK3J46TzVp6
|
||||
LR25AoGAJF0PwL19DWsJq/lfftgaUqSBwgdJh4ene+lvatdAfFZ1D6LUE+wUXXd+
|
||||
EyVxLnvg4Yp2j0ZxTPc2Bv/9/H/Rso79kdZgyt/cSA+FpgZRTy/zKl7BsNnJxgQJ
|
||||
cpNottrjMWgRXrbmTkqmqUtkqc31HMTmZ3U1Fum/uh0sEOv7Rd0=
|
||||
-----END RSA PRIVATE KEY-----
|
38
genesis.sh
Executable file
38
genesis.sh
Executable file
@ -0,0 +1,38 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
if [ "$(id -u)" != "0" ]; then
|
||||
echo "This script must be run as root." 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
set -ex
|
||||
|
||||
mkdir -p /etc/docker
|
||||
cat <<EOS > /etc/docker/daemon.json
|
||||
{
|
||||
"live-restore": true,
|
||||
"storage-driver": "overlay2"
|
||||
}
|
||||
EOS
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get update -qq
|
||||
apt-get install -y -qq --no-install-recommends \
|
||||
docker.io \
|
||||
|
||||
|
||||
if [ -f "${PROMENADE_LOAD_IMAGE}" ]; then
|
||||
echo === Loading updated promenade image ===
|
||||
docker load -i "${PROMENADE_LOAD_IMAGE}"
|
||||
fi
|
||||
|
||||
docker run -t --rm \
|
||||
--net host \
|
||||
-v /:/target \
|
||||
quay.io/attcomdev/promenade:experimental \
|
||||
promenade \
|
||||
-v \
|
||||
genesis \
|
||||
--hostname $(hostname) \
|
||||
--config-path /target$(realpath $1) 2>&1
|
37
join.sh
Executable file
37
join.sh
Executable file
@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
if [ "$(id -u)" != "0" ]; then
|
||||
echo "This script must be run as root." 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
set -ex
|
||||
|
||||
mkdir -p /etc/docker
|
||||
cat <<EOS > /etc/docker/daemon.json
|
||||
{
|
||||
"live-restore": true,
|
||||
"storage-driver": "overlay2"
|
||||
}
|
||||
EOS
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get update -qq
|
||||
apt-get install -y -qq --no-install-recommends \
|
||||
docker.io \
|
||||
|
||||
|
||||
if [ -f "${PROMENADE_LOAD_IMAGE}" ]; then
|
||||
echo === Loading updated promenade image ===
|
||||
docker load -i "${PROMENADE_LOAD_IMAGE}"
|
||||
fi
|
||||
|
||||
docker run -t --rm \
|
||||
-v /:/target \
|
||||
quay.io/attcomdev/promenade:experimental \
|
||||
promenade \
|
||||
-v \
|
||||
join \
|
||||
--hostname $(hostname) \
|
||||
--config-path /target$(realpath $1)
|
@ -1,26 +0,0 @@
|
||||
[Unit]
|
||||
Description=Kubernetes Kubelet
|
||||
Documentation=https://kubernetes.io/docs/admin/kubelet/
|
||||
|
||||
[Service]
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStart=/usr/local/bin/kubelet \
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--require-kubeconfig \
|
||||
--cni-conf-dir=/etc/cni/net.d \
|
||||
--cni-bin-dir=/opt/cni/bin \
|
||||
--network-plugin=cni \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--exit-on-lock-contention \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--allow-privileged \
|
||||
--cluster_dns=192.168.1.70,8.8.8.8,10.3.0.10 \
|
||||
--cluster_domain=cluster.local \
|
||||
--node-labels=node-role.kubernetes.io/canal-node=true,node-role.kubernetes.io/master= \
|
||||
--hostname-override=${NODE_HOSTNAME} \
|
||||
--v=2
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
0
promenade/__init__.py
Normal file
0
promenade/__init__.py
Normal file
13
promenade/assets.py
Normal file
13
promenade/assets.py
Normal file
@ -0,0 +1,13 @@
|
||||
from promenade import logging
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
__all__ = ['rsync']
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def rsync(*, src, dest):
|
||||
LOG.info('Syncing assets from "%s" to "%s".', src, dest)
|
||||
subprocess.run(['/usr/bin/rsync', '-r', os.path.join(src, ''), dest], check=True)
|
59
promenade/cli.py
Normal file
59
promenade/cli.py
Normal file
@ -0,0 +1,59 @@
|
||||
from . import logging, operator
|
||||
import click
|
||||
|
||||
__all__ = []
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.option('-v', '--verbose', is_flag=True)
|
||||
def promenade(*, verbose):
|
||||
logging.setup(verbose=verbose)
|
||||
|
||||
|
||||
@promenade.command(help='Initialize a new cluster on one node')
|
||||
@click.option('-a', '--asset-dir', default='/assets',
|
||||
type=click.Path(exists=True, file_okay=False,
|
||||
dir_okay=True, resolve_path=True),
|
||||
help='Source path for binaries to deploy.')
|
||||
@click.option('-c', '--config-path',
|
||||
type=click.Path(exists=True, file_okay=True,
|
||||
dir_okay=False, resolve_path=True),
|
||||
help='Location of cluster configuration data.')
|
||||
@click.option('--hostname', help='Current hostname.')
|
||||
@click.option('-t', '--target-dir', default='/target',
|
||||
type=click.Path(exists=True, file_okay=False,
|
||||
dir_okay=True, resolve_path=True),
|
||||
help='Location where templated files will be placed.')
|
||||
def genesis(*, asset_dir, config_path, hostname, target_dir):
|
||||
|
||||
op = operator.Operator.from_config(config_path=config_path,
|
||||
hostname=hostname,
|
||||
target_dir=target_dir)
|
||||
|
||||
op.genesis(asset_dir=asset_dir)
|
||||
|
||||
|
||||
@promenade.command(help='Join an existing cluster')
|
||||
@click.option('-a', '--asset-dir', default='/assets',
|
||||
type=click.Path(exists=True, file_okay=False,
|
||||
dir_okay=True, resolve_path=True),
|
||||
help='Source path for binaries to deploy.')
|
||||
@click.option('-c', '--config-path',
|
||||
type=click.Path(exists=True, file_okay=True,
|
||||
dir_okay=False, resolve_path=True),
|
||||
help='Location of cluster configuration data.')
|
||||
@click.option('--hostname', help='Current hostname.')
|
||||
@click.option('-t', '--target-dir', default='/target',
|
||||
type=click.Path(exists=True, file_okay=False,
|
||||
dir_okay=True, resolve_path=True),
|
||||
help='Location where templated files will be placed.')
|
||||
def join(*, asset_dir, config_path, hostname, target_dir):
|
||||
|
||||
op = operator.Operator.from_config(config_path=config_path,
|
||||
hostname=hostname,
|
||||
target_dir=target_dir)
|
||||
|
||||
op.join(asset_dir=asset_dir)
|
106
promenade/config.py
Normal file
106
promenade/config.py
Normal file
@ -0,0 +1,106 @@
|
||||
from . import logging
|
||||
from operator import itemgetter
|
||||
import itertools
|
||||
import yaml
|
||||
|
||||
__all__ = ['load_config_file']
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def load_config_file(*, config_path, hostname):
|
||||
LOG.debug('Loading genesis configuration from "%s"', config_path)
|
||||
cluster_data = yaml.load(open(config_path))
|
||||
LOG.debug('Loaded genesis configruation from "%s"', config_path)
|
||||
node_data = extract_node_data(hostname, cluster_data)
|
||||
|
||||
return {
|
||||
'cluster_data': cluster_data,
|
||||
'node_data': node_data,
|
||||
}
|
||||
|
||||
|
||||
def extract_node_data(hostname, cluster_data):
|
||||
genesis = _extract_genesis_data(cluster_data['nodes'])
|
||||
masters = _extract_master_data(cluster_data['nodes'])
|
||||
return {
|
||||
'cluster': cluster_data['nodes'],
|
||||
'current_node': _extract_current_node_data(cluster_data['nodes'],
|
||||
hostname),
|
||||
'etcd': _extract_etcd_data(hostname, genesis, masters),
|
||||
'genesis': genesis,
|
||||
'masters': masters,
|
||||
'network': cluster_data['network'],
|
||||
}
|
||||
|
||||
|
||||
def _extract_etcd_data(hostname, genesis, masters):
|
||||
LOG.info('hostname=%r genesis=%r masters=%r',
|
||||
hostname, genesis, masters)
|
||||
non_genesis_masters = [d for d in masters if d['hostname'] != genesis['hostname']]
|
||||
boot_order = [genesis] + sorted(non_genesis_masters, key=itemgetter('hostname'))
|
||||
|
||||
result = {
|
||||
'boot_order': boot_order,
|
||||
'env': {},
|
||||
}
|
||||
|
||||
peers = []
|
||||
for host in boot_order:
|
||||
peers.append(host)
|
||||
if host['hostname'] == hostname:
|
||||
break
|
||||
|
||||
result['env']['ETCD_INITIAL_CLUSTER'] = ','.join(
|
||||
'%s=https://%s:2380' % (p['hostname'], p['hostname'])
|
||||
for p in peers)
|
||||
|
||||
if hostname == genesis['hostname']:
|
||||
result['env']['ETCD_INITIAL_CLUSTER_STATE'] = 'new'
|
||||
else:
|
||||
result['env']['ETCD_INITIAL_CLUSTER_STATE'] = 'existing'
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def _extract_current_node_data(nodes, hostname):
|
||||
base = nodes[hostname]
|
||||
return {
|
||||
'hostname': hostname,
|
||||
'labels': _extract_node_labels(base),
|
||||
**base,
|
||||
}
|
||||
|
||||
|
||||
ROLE_LABELS = {
|
||||
'genesis': [
|
||||
'promenade=genesis',
|
||||
],
|
||||
'master': [
|
||||
'node-role.kubernetes.io/master=',
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def _extract_node_labels(data):
|
||||
labels = set(itertools.chain.from_iterable(
|
||||
map(lambda k: ROLE_LABELS.get(k, []), ['common'] + data['roles'])))
|
||||
labels.update(data.get('additional_labels', []))
|
||||
return sorted(labels)
|
||||
|
||||
|
||||
def _extract_genesis_data(nodes):
|
||||
for hostname, node in nodes.items():
|
||||
if 'genesis' in node['roles']:
|
||||
return {
|
||||
'hostname': hostname,
|
||||
'ip': node['ip'],
|
||||
}
|
||||
|
||||
|
||||
def _extract_master_data(nodes):
|
||||
return sorted(({'hostname': hostname, 'ip': node['ip']}
|
||||
for hostname, node in nodes.items()
|
||||
if 'master' in node['roles']),
|
||||
key=itemgetter('hostname'))
|
24
promenade/etcd.py
Normal file
24
promenade/etcd.py
Normal file
@ -0,0 +1,24 @@
|
||||
from . import kube, logging
|
||||
|
||||
__all__ = ['add_member']
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def add_member(exec_pod, hostname, port):
|
||||
opts = ' '.join([
|
||||
'--cacert',
|
||||
'/etc/etcd-pki/cluster-ca.pem',
|
||||
'--cert',
|
||||
'/etc/etcd-pki/etcd.pem',
|
||||
'--key',
|
||||
'/etc/etcd-pki/etcd-key.pem',
|
||||
])
|
||||
result = kube.kc('exec', '-n', 'kube-system', '-t', exec_pod, '--', 'sh', '-c',
|
||||
'ETCDCTL_API=3 etcdctl %s member add %s --peer-urls https://%s:%d'
|
||||
% (opts, hostname, hostname, port))
|
||||
if result.returncode != 0:
|
||||
LOG.error('Failed to add etcd member. STDOUT: %r', result.stdout)
|
||||
LOG.error('Failed to add etcd member. STDERR: %r', result.stderr)
|
||||
result.check_returncode()
|
27
promenade/kube.py
Normal file
27
promenade/kube.py
Normal file
@ -0,0 +1,27 @@
|
||||
from . import logging
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
__all__ = ['kc', 'wait_for_node']
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def wait_for_node(node):
|
||||
repeat = True
|
||||
while repeat:
|
||||
result = kc('get', 'nodes', node, '-o',
|
||||
r'jsonpath={.status.conditions[?(@.type=="Ready")].status}')
|
||||
if result.stdout == b'True':
|
||||
repeat = False
|
||||
else:
|
||||
LOG.debug('Node "%s" not ready, waiting. stdout=%r stderr=%r',
|
||||
node, result.stdout, result.stderr)
|
||||
time.sleep(5)
|
||||
|
||||
|
||||
def kc(*args):
|
||||
return subprocess.run(['/target/usr/local/bin/kubectl',
|
||||
'--kubeconfig', '/target/etc/kubernetes/genesis/kubeconfig.yaml', *args],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
15
promenade/logging.py
Normal file
15
promenade/logging.py
Normal file
@ -0,0 +1,15 @@
|
||||
import logging
|
||||
from logging import getLogger
|
||||
|
||||
__all__ = ['getLogger', 'setup']
|
||||
|
||||
|
||||
LOG_FORMAT = '%(asctime)s %(levelname)-8s %(name)s:%(funcName)s [%(lineno)3d] %(message)s'
|
||||
|
||||
|
||||
def setup(*, verbose):
|
||||
if verbose:
|
||||
level = logging.DEBUG
|
||||
else:
|
||||
level = logging.INFO
|
||||
logging.basicConfig(format=LOG_FORMAT, level=level)
|
77
promenade/operator.py
Normal file
77
promenade/operator.py
Normal file
@ -0,0 +1,77 @@
|
||||
from . import config, etcd, logging, kube, pki, renderer
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
__all__ = ['Operator']
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Operator:
|
||||
@classmethod
|
||||
def from_config(cls, *,
|
||||
config_path,
|
||||
hostname,
|
||||
target_dir):
|
||||
return cls(hostname=hostname, target_dir=target_dir,
|
||||
**config.load_config_file(config_path=config_path,
|
||||
hostname=hostname))
|
||||
|
||||
def __init__(self, *, cluster_data, hostname, node_data, target_dir):
|
||||
self.cluster_data = cluster_data
|
||||
self.hostname = hostname
|
||||
self.node_data = node_data
|
||||
self.target_dir = target_dir
|
||||
|
||||
def genesis(self, *, asset_dir=None):
|
||||
self.setup(asset_dir=asset_dir)
|
||||
self.expand_etcd_cluster()
|
||||
|
||||
def join(self, *, asset_dir=None):
|
||||
self.setup(asset_dir=asset_dir)
|
||||
|
||||
def setup(self, *, asset_dir):
|
||||
self.rsync_from(asset_dir)
|
||||
self.render()
|
||||
self.install_keys()
|
||||
|
||||
self.bootstrap()
|
||||
|
||||
def rsync_from(self, src):
|
||||
if src:
|
||||
LOG.debug('Syncing assets from "%s" to "%s".', src, self.target_dir)
|
||||
subprocess.run(['/usr/bin/rsync', '-r',
|
||||
os.path.join(src, ''), self.target_dir],
|
||||
check=True)
|
||||
else:
|
||||
LOG.debug('No source directory given for rsync.')
|
||||
|
||||
|
||||
def render(self):
|
||||
r = renderer.Renderer(node_data=self.node_data,
|
||||
target_dir=self.target_dir)
|
||||
r.render()
|
||||
|
||||
def install_keys(self):
|
||||
pki.generate_keys(initial_pki=self.cluster_data['pki'],
|
||||
target_dir=self.target_dir)
|
||||
|
||||
def bootstrap(self):
|
||||
LOG.debug('Running genesis script with chroot "%s"', self.target_dir)
|
||||
subprocess.run([os.path.join(self.target_dir, 'usr/sbin/chroot'),
|
||||
self.target_dir,
|
||||
'/bin/bash', '/usr/local/bin/bootstrap'],
|
||||
check=True)
|
||||
|
||||
def expand_etcd_cluster(self):
|
||||
for node in self.node_data['etcd']['boot_order'][1:]:
|
||||
LOG.info('Waiting for Node "%s" to be Ready', node['hostname'])
|
||||
kube.wait_for_node(node['hostname'])
|
||||
LOG.info('Node "%s" Ready. Adding to etcd cluster.', node['hostname'])
|
||||
etcd.add_member(self.genesis_etcd_pod, node['hostname'], port=2380)
|
||||
LOG.info('Finished expanding etcd cluster.')
|
||||
|
||||
@property
|
||||
def genesis_etcd_pod(self):
|
||||
return 'kube-etcd-%s' % self.node_data['genesis']['hostname']
|
143
promenade/pki.py
Normal file
143
promenade/pki.py
Normal file
@ -0,0 +1,143 @@
|
||||
from promenade import logging
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
||||
__all__ = ['generate_keys']
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
CA_ONLY_MAP = {
|
||||
'cluster-ca': [
|
||||
'kubelet',
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
FULL_DISTRIBUTION_MAP = {
|
||||
'apiserver': [
|
||||
'apiserver',
|
||||
],
|
||||
'apiserver-key': [
|
||||
'apiserver',
|
||||
],
|
||||
'controller-manager': [
|
||||
'controller-manager',
|
||||
],
|
||||
'controller-manager-key': [
|
||||
'controller-manager',
|
||||
],
|
||||
'kubelet': [
|
||||
'kubelet',
|
||||
],
|
||||
'kubelet-key': [
|
||||
'kubelet',
|
||||
],
|
||||
'proxy': [
|
||||
'proxy',
|
||||
],
|
||||
'proxy-key': [
|
||||
'proxy',
|
||||
],
|
||||
'scheduler': [
|
||||
'scheduler',
|
||||
],
|
||||
'scheduler-key': [
|
||||
'scheduler',
|
||||
],
|
||||
|
||||
'cluster-ca': [
|
||||
'admin',
|
||||
'apiserver',
|
||||
'asset-loader',
|
||||
'controller-manager',
|
||||
'etcd',
|
||||
'genesis',
|
||||
'kubelet',
|
||||
'proxy',
|
||||
'scheduler',
|
||||
],
|
||||
'cluster-ca-key': [
|
||||
'controller-manager',
|
||||
],
|
||||
|
||||
'sa': [
|
||||
'apiserver',
|
||||
],
|
||||
'sa-key': [
|
||||
'controller-manager',
|
||||
],
|
||||
|
||||
'etcd': [
|
||||
'etcd',
|
||||
],
|
||||
'etcd-key': [
|
||||
'etcd',
|
||||
],
|
||||
|
||||
'admin': [
|
||||
'admin',
|
||||
],
|
||||
'admin-key': [
|
||||
'admin',
|
||||
],
|
||||
'asset-loader': [
|
||||
'asset-loader',
|
||||
],
|
||||
'asset-loader-key': [
|
||||
'asset-loader',
|
||||
],
|
||||
'genesis': [
|
||||
'genesis',
|
||||
],
|
||||
'genesis-key': [
|
||||
'genesis',
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def generate_keys(*, initial_pki, target_dir):
|
||||
if os.path.exists(os.path.join(target_dir, 'etc/kubernetes/cfssl')):
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
_write_initial_pki(tmp, initial_pki)
|
||||
|
||||
_generate_certs(tmp, target_dir)
|
||||
|
||||
_distribute_files(tmp, target_dir, FULL_DISTRIBUTION_MAP)
|
||||
|
||||
|
||||
def _write_initial_pki(tmp, initial_pki):
|
||||
for filename, data in initial_pki.items():
|
||||
path = os.path.join(tmp, filename + '.pem')
|
||||
with open(path, 'w') as f:
|
||||
LOG.debug('Writing data for "%s" to path "%s"', filename, path)
|
||||
f.write(data)
|
||||
|
||||
|
||||
def _generate_certs(dest, target):
|
||||
ca_config_path = os.path.join(target, 'etc/kubernetes/cfssl/ca-config.json')
|
||||
ca_path = os.path.join(dest, 'cluster-ca.pem')
|
||||
ca_key_path = os.path.join(dest, 'cluster-ca-key.pem')
|
||||
search_dir = os.path.join(target, 'etc/kubernetes/cfssl/csr-configs')
|
||||
for filename in os.listdir(search_dir):
|
||||
name, _ext = os.path.splitext(filename)
|
||||
LOG.info('Generating cert for %s', name)
|
||||
path = os.path.join(search_dir, filename)
|
||||
cfssl_result = subprocess.check_output([
|
||||
'cfssl', 'gencert', '-ca', ca_path, '-ca-key', ca_key_path,
|
||||
'-config', ca_config_path, '-profile', 'kubernetes', path])
|
||||
subprocess.run(['cfssljson', '-bare', name], cwd=dest,
|
||||
input=cfssl_result, check=True)
|
||||
|
||||
|
||||
def _distribute_files(src, dest, distribution_map):
|
||||
for filename, destinations in distribution_map.items():
|
||||
src_path = os.path.join(src, filename + '.pem')
|
||||
if os.path.exists(src_path):
|
||||
for destination in destinations:
|
||||
dest_dir = os.path.join(dest, 'etc/kubernetes/%s/pki' % destination)
|
||||
os.makedirs(dest_dir, exist_ok=True)
|
||||
shutil.copy(src_path, dest_dir)
|
59
promenade/renderer.py
Normal file
59
promenade/renderer.py
Normal file
@ -0,0 +1,59 @@
|
||||
from . import logging
|
||||
import jinja2
|
||||
import os
|
||||
import pkg_resources
|
||||
|
||||
__all__ = ['Renderer']
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Renderer:
|
||||
def __init__(self, *, node_data, target_dir):
|
||||
self.data = node_data
|
||||
self.target_dir = target_dir
|
||||
|
||||
@property
|
||||
def template_paths(self):
|
||||
return ['common'] + self.data['current_node']['roles']
|
||||
|
||||
def render(self):
|
||||
for template_dir in self.template_paths:
|
||||
self.render_template_dir(template_dir)
|
||||
|
||||
|
||||
def render_template_dir(self, template_dir):
|
||||
source_root = pkg_resources.resource_filename(
|
||||
'promenade', os.path.join('templates', template_dir))
|
||||
LOG.debug('Searching for templates in: "%s"', source_root)
|
||||
for root, _dirnames, filenames in os.walk(source_root,
|
||||
followlinks=True):
|
||||
for source_filename in filenames:
|
||||
source_path = os.path.join(root, source_filename)
|
||||
self.render_template_file(path=source_path,
|
||||
root=source_root)
|
||||
|
||||
def render_template_file(self, *, path, root):
|
||||
base_path = os.path.relpath(path, root)
|
||||
target_path = os.path.join(self.target_dir, base_path)
|
||||
|
||||
_ensure_path(target_path)
|
||||
|
||||
LOG.debug('Templating "%s" into "%s"', path, target_path)
|
||||
|
||||
env = jinja2.Environment(undefined=jinja2.StrictUndefined)
|
||||
|
||||
with open(path) as f:
|
||||
template = env.from_string(f.read())
|
||||
rendered_data = template.render(**self.data)
|
||||
|
||||
with open(target_path, 'w') as f:
|
||||
f.write(rendered_data)
|
||||
|
||||
LOG.info('Installed "%s"', os.path.join('/', base_path))
|
||||
|
||||
|
||||
def _ensure_path(path):
|
||||
base = os.path.dirname(path)
|
||||
os.makedirs(base, mode=0o775, exist_ok=True)
|
@ -0,0 +1,4 @@
|
||||
{% for master in masters %}
|
||||
host-record=kubernetes,{{ master['ip'] }}
|
||||
host-record={{ master['hostname'] }},{{ master['ip'] }}
|
||||
{% endfor %}
|
@ -0,0 +1,13 @@
|
||||
{
|
||||
"signing": {
|
||||
"default": {
|
||||
"expiry": "8760h"
|
||||
},
|
||||
"profiles": {
|
||||
"kubernetes": {
|
||||
"usages": ["signing", "key encipherment", "server auth", "client auth"],
|
||||
"expiry": "8760h"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,16 @@
|
||||
{
|
||||
"CN": "system:node:{{ current_node['hostname'] }}",
|
||||
"hosts": [
|
||||
"{{ current_node['hostname'] }}",
|
||||
"{{ current_node['ip'] }}"
|
||||
],
|
||||
"names": [
|
||||
{
|
||||
"O": "system:nodes"
|
||||
}
|
||||
],
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
}
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
{
|
||||
"CN": "system:kube-proxy",
|
||||
"hosts": [
|
||||
"{{ current_node['hostname'] }}",
|
||||
"{{ current_node['ip'] }}"
|
||||
],
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
}
|
||||
}
|
@ -0,0 +1,20 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://kubernetes
|
||||
certificate-authority: /etc/kubernetes/kubelet/pki/cluster-ca.pem
|
||||
name: kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubernetes
|
||||
user: kubelet
|
||||
name: kubelet@kubernetes
|
||||
current-context: kubelet@kubernetes
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
client-certificate: /etc/kubernetes/kubelet/pki/kubelet.pem
|
||||
client-key: /etc/kubernetes/kubelet/pki/kubelet-key.pem
|
@ -0,0 +1,39 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-proxy
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
component: kube-proxy
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-proxy
|
||||
image: gcr.io/google_containers/hyperkube-amd64:v1.6.2
|
||||
command:
|
||||
- /hyperkube
|
||||
- proxy
|
||||
- --cluster-cidr={{ network.pod_ip_cidr }}
|
||||
- --hostname-override=$(NODE_NAME)
|
||||
- --kubeconfig=/etc/kubernetes/config/kubeconfig.yaml
|
||||
- --proxy-mode=iptables
|
||||
- --v=5
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/kubernetes
|
||||
readOnly: true
|
||||
hostNetwork: true
|
||||
volumes:
|
||||
- name: config
|
||||
hostPath:
|
||||
path: /etc/kubernetes/proxy
|
@ -0,0 +1,20 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://kubernetes
|
||||
certificate-authority: /etc/kubernetes/pki/cluster-ca.pem
|
||||
name: kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubernetes
|
||||
user: proxy
|
||||
name: proxy@kubernetes
|
||||
current-context: proxy@kubernetes
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: proxy
|
||||
user:
|
||||
client-certificate: /etc/kubernetes/pki/proxy.pem
|
||||
client-key: /etc/kubernetes/pki/proxy-key.pem
|
@ -0,0 +1,24 @@
|
||||
[Unit]
|
||||
Description=Kubernetes Kubelet
|
||||
Documentation=https://kubernetes.io/docs/admin/kubelet/
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/local/bin/kubelet \
|
||||
--allow-privileged=true \
|
||||
--cluster-dns={{ network.cluster_dns }} \
|
||||
--cluster-domain={{ network.cluster_domain }} \
|
||||
--cni-bin-dir=/opt/cni/bin \
|
||||
--cni-conf-dir=/etc/cni/net.d \
|
||||
--hostname-override={{ current_node.hostname }} \
|
||||
--kubeconfig=/etc/kubernetes/kubelet/kubeconfig.yaml \
|
||||
--network-plugin=cni \
|
||||
--node-ip={{ current_node.ip }} \
|
||||
--node-labels={{ current_node.labels | join(',') }} \
|
||||
--pod-manifest-path=/etc/kubernetes/kubelet/manifests \
|
||||
--require-kubeconfig=true \
|
||||
--v=5
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
11
promenade/templates/common/usr/local/bin/bootstrap
Executable file
11
promenade/templates/common/usr/local/bin/bootstrap
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
apt-get install -y -qq --no-install-recommends dnsmasq socat
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl enable kubelet
|
||||
systemctl restart kubelet
|
@ -0,0 +1,140 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: flannel
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: flannel
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: flannel
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: flannel
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: flannel
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kube-flannel-cfg
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
app: flannel
|
||||
data:
|
||||
cni-conf.json: |
|
||||
{
|
||||
"name": "cbr0",
|
||||
"type": "flannel",
|
||||
"delegate": {
|
||||
"isDefaultGateway": true
|
||||
}
|
||||
}
|
||||
net-conf.json: |
|
||||
{
|
||||
"Network": "{{ network.pod_ip_cidr }}",
|
||||
"Backend": {
|
||||
"Type": "vxlan"
|
||||
}
|
||||
}
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-flannel-ds
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
app: flannel
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: node
|
||||
app: flannel
|
||||
spec:
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/arch: amd64
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
serviceAccountName: flannel
|
||||
containers:
|
||||
- name: kube-flannel
|
||||
image: quay.io/coreos/flannel:v0.7.1-amd64
|
||||
command:
|
||||
- /opt/bin/flanneld
|
||||
- --ip-masq
|
||||
- --kube-subnet-mgr
|
||||
securityContext:
|
||||
privileged: true
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumeMounts:
|
||||
- name: run
|
||||
mountPath: /run
|
||||
- name: flannel-cfg
|
||||
mountPath: /etc/kube-flannel/
|
||||
- name: install-cni
|
||||
image: quay.io/coreos/flannel:v0.7.1-amd64
|
||||
command: [ "/bin/sh", "-c", "set -e -x; cp -f /etc/kube-flannel/cni-conf.json /etc/cni/net.d/10-flannel.conf; while true; do sleep 3600; done" ]
|
||||
volumeMounts:
|
||||
- name: cni
|
||||
mountPath: /etc/cni/net.d
|
||||
- name: flannel-cfg
|
||||
mountPath: /etc/kube-flannel/
|
||||
volumes:
|
||||
- name: run
|
||||
hostPath:
|
||||
path: /run
|
||||
- name: cni
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: flannel-cfg
|
||||
configMap:
|
||||
name: kube-flannel-cfg
|
@ -1,70 +1,102 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
upstreamNameservers: |-
|
||||
["8.8.8.8", "8.8.4.4"]
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "KubeDNS"
|
||||
spec:
|
||||
# replicas: not specified here:
|
||||
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
|
||||
# 2. Default is 1.
|
||||
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: {{ network.cluster_dns }}
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 10%
|
||||
maxUnavailable: 0
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: beta.kubernetes.io/arch
|
||||
operator: In
|
||||
values:
|
||||
- amd64
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- topologyKey: kubernetes.io/hostname
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: k8s-app
|
||||
operator: In
|
||||
values:
|
||||
- kube-dns
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.1
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
# guaranteed class. Currently, this container falls into the
|
||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||
limits:
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthcheck/kubedns
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readiness
|
||||
port: 8081
|
||||
scheme: HTTP
|
||||
# we poll on pod startup for the Kubernetes master service and
|
||||
# only setup the /readiness HTTP server once that's available.
|
||||
initialDelaySeconds: 3
|
||||
timeoutSeconds: 5
|
||||
args:
|
||||
- --domain=cluster.local.
|
||||
- --dns-port=10053
|
||||
- --config-dir=/kube-dns-config
|
||||
- --v=2
|
||||
- --v=5
|
||||
env:
|
||||
- name: PROMETHEUS_PORT
|
||||
value: "10055"
|
||||
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 10053
|
||||
name: dns-local
|
||||
@ -75,21 +107,28 @@ spec:
|
||||
- containerPort: 10055
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.1
|
||||
livenessProbe:
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
port: 10054
|
||||
path: /readiness
|
||||
port: 8081
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
args:
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
volumeMounts:
|
||||
- mountPath: /kube-dns-config
|
||||
name: kube-dns-config
|
||||
- args:
|
||||
- -v=2
|
||||
- -logtostderr
|
||||
- -configDir=/etc/k8s/dns/dnsmasq-nanny
|
||||
@ -101,6 +140,19 @@ spec:
|
||||
- --server=/cluster.local/127.0.0.1#10053
|
||||
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||
- --server=/ip6.arpa/127.0.0.1#10053
|
||||
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
name: dnsmasq
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
@ -108,49 +160,98 @@ spec:
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
|
||||
resources:
|
||||
requests:
|
||||
cpu: 150m
|
||||
memory: 20Mi
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
volumeMounts:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.1
|
||||
- mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
name: kube-dns-config
|
||||
- args:
|
||||
- --v=2
|
||||
- --logtostderr
|
||||
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A
|
||||
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A
|
||||
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
args:
|
||||
- --v=2
|
||||
- --logtostderr
|
||||
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A
|
||||
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A
|
||||
timeoutSeconds: 5
|
||||
name: sidecar
|
||||
ports:
|
||||
- containerPort: 10054
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
memory: 20Mi
|
||||
cpu: 10m
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
memory: 20Mi
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: Default
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
serviceAccount: kube-dns
|
||||
serviceAccountName: kube-dns
|
||||
terminationGracePeriodSeconds: 30
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: kube-dns-config
|
||||
configMap:
|
||||
- configMap:
|
||||
defaultMode: 420
|
||||
name: kube-dns
|
||||
optional: true
|
||||
name: kube-dns-config
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
name: system:kube-dns
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
name: system:kube-dns
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:kube-dns
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
@ -0,0 +1,70 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: helm
|
||||
name: tiller
|
||||
name: tiller-deploy
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: helm
|
||||
name: tiller
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: helm
|
||||
name: tiller
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: TILLER_NAMESPACE
|
||||
value: kube-system
|
||||
image: gcr.io/kubernetes-helm/tiller:v2.4.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /liveness
|
||||
port: 44135
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 1
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
name: tiller
|
||||
ports:
|
||||
- containerPort: 44134
|
||||
name: tiller
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /readiness
|
||||
port: 44135
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 1
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
@ -0,0 +1,20 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://kubernetes
|
||||
certificate-authority: /etc/kubernetes/pki/cluster-ca.pem
|
||||
name: kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubernetes
|
||||
user: asset-loader
|
||||
name: asset-loader@kubernetes
|
||||
current-context: asset-loader@kubernetes
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: asset-loader
|
||||
user:
|
||||
client-certificate: /etc/kubernetes/pki/asset-loader.pem
|
||||
client-key: /etc/kubernetes/pki/asset-loader-key.pem
|
@ -0,0 +1,16 @@
|
||||
{
|
||||
"CN": "asset-loader",
|
||||
"hosts": [
|
||||
"{{ current_node['hostname'] }}",
|
||||
"{{ current_node['ip'] }}"
|
||||
],
|
||||
"names": [
|
||||
{
|
||||
"O": "system:masters"
|
||||
}
|
||||
],
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
}
|
||||
}
|
@ -0,0 +1,16 @@
|
||||
{
|
||||
"CN": "genesis",
|
||||
"hosts": [
|
||||
"{{ current_node['hostname'] }}",
|
||||
"{{ current_node['ip'] }}"
|
||||
],
|
||||
"names": [
|
||||
{
|
||||
"O": "system:masters"
|
||||
}
|
||||
],
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
}
|
||||
}
|
@ -0,0 +1,20 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://127.0.0.1
|
||||
certificate-authority: /target/etc/kubernetes/genesis/pki/cluster-ca.pem
|
||||
name: kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubernetes
|
||||
user: genesis
|
||||
name: genesis@kubernetes
|
||||
current-context: genesis@kubernetes
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: genesis
|
||||
user:
|
||||
client-certificate: /target/etc/kubernetes/genesis/pki/genesis.pem
|
||||
client-key: /target/etc/kubernetes/genesis/pki/genesis-key.pem
|
@ -0,0 +1,34 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: asset-loader
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: promenade
|
||||
component: asset-loader
|
||||
spec:
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: loader
|
||||
image: gcr.io/google_containers/hyperkube-amd64:v1.6.2
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |-
|
||||
set -x
|
||||
while true; do
|
||||
sleep 60
|
||||
/kubectl \
|
||||
--kubeconfig /etc/kubernetes/kubeconfig.yaml \
|
||||
apply -f /etc/kubernetes/assets
|
||||
done
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/kubernetes
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: config
|
||||
hostPath:
|
||||
path: /etc/kubernetes/asset-loader
|
@ -0,0 +1,20 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://kubernetes
|
||||
certificate-authority: /etc/kubernetes/admin/pki/cluster-ca.pem
|
||||
name: kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubernetes
|
||||
user: admin
|
||||
name: admin@kubernetes
|
||||
current-context: admin@kubernetes
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: admin
|
||||
user:
|
||||
client-certificate: /etc/kubernetes/admin/pki/admin.pem
|
||||
client-key: /etc/kubernetes/admin/pki/admin-key.pem
|
@ -0,0 +1,16 @@
|
||||
{
|
||||
"CN": "admin",
|
||||
"hosts": [
|
||||
"{{ current_node['hostname'] }}",
|
||||
"{{ current_node['ip'] }}"
|
||||
],
|
||||
"names": [
|
||||
{
|
||||
"O": "system:masters"
|
||||
}
|
||||
],
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
}
|
||||
}
|
@ -0,0 +1,17 @@
|
||||
{
|
||||
"CN": "system:kube-apiserver",
|
||||
"hosts": [
|
||||
"kubernetes",
|
||||
"kubernetes.default",
|
||||
"kubernetes.default.svc",
|
||||
"kubernetes.default.svc.cluster.local",
|
||||
"127.0.0.1",
|
||||
"{{ current_node['hostname'] }}",
|
||||
"{{ current_node['ip'] }}",
|
||||
"{{ network.kube_service_ip }}"
|
||||
],
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
}
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
{
|
||||
"CN": "system:kube-controller-manager",
|
||||
"hosts": [
|
||||
"{{ current_node['hostname'] }}",
|
||||
"{{ current_node['ip'] }}"
|
||||
],
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
}
|
||||
}
|
@ -0,0 +1,17 @@
|
||||
{
|
||||
"CN": "etcd:{{ current_node['hostname'] }}",
|
||||
"hosts": [
|
||||
"kubernetes",
|
||||
"kubernetes.default",
|
||||
"kubernetes.default.svc",
|
||||
"kubernetes.default.svc.cluster.local",
|
||||
"127.0.0.1",
|
||||
"{{ current_node['hostname'] }}",
|
||||
"{{ current_node['ip'] }}",
|
||||
"{{ network.kube_service_ip }}"
|
||||
],
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
}
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
{
|
||||
"CN": "system:kube-scheduler",
|
||||
"hosts": [
|
||||
"{{ current_node['hostname'] }}",
|
||||
"{{ current_node['ip'] }}"
|
||||
],
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
}
|
||||
}
|
@ -0,0 +1,20 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://kubernetes
|
||||
certificate-authority: /etc/kubernetes/pki/cluster-ca.pem
|
||||
name: kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubernetes
|
||||
user: controller-manager
|
||||
name: controller-manager@kubernetes
|
||||
current-context: controller-manager@kubernetes
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: controller-manager
|
||||
user:
|
||||
client-certificate: /etc/kubernetes/pki/controller-manager.pem
|
||||
client-key: /etc/kubernetes/pki/controller-manager-key.pem
|
@ -0,0 +1,46 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-apiserver
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: control-plane
|
||||
component: kube-apiserver
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kube-apiserver
|
||||
image: gcr.io/google_containers/hyperkube-amd64:v1.6.2
|
||||
command:
|
||||
- /hyperkube
|
||||
- apiserver
|
||||
- --advertise-address={{ current_node.ip }}
|
||||
- --authorization-mode=RBAC
|
||||
- --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds
|
||||
- --anonymous-auth=false
|
||||
- --client-ca-file=/etc/kubernetes/pki/cluster-ca.pem
|
||||
- --insecure-port=0
|
||||
- --bind-address=0.0.0.0
|
||||
- --secure-port=443
|
||||
- --allow-privileged=true
|
||||
- --etcd-servers=https://kubernetes:2379
|
||||
- --etcd-cafile=/etc/kubernetes/pki/cluster-ca.pem
|
||||
- --etcd-certfile=/etc/kubernetes/pki/apiserver.pem
|
||||
- --etcd-keyfile=/etc/kubernetes/pki/apiserver-key.pem
|
||||
- --service-cluster-ip-range={{ network.service_ip_cidr }}
|
||||
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
|
||||
- --service-account-key-file=/etc/kubernetes/pki/sa.pem
|
||||
- --tls-cert-file=/etc/kubernetes/pki/apiserver.pem
|
||||
- --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem
|
||||
- --v=5
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/kubernetes
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: config
|
||||
hostPath:
|
||||
path: /etc/kubernetes/apiserver
|
@ -0,0 +1,40 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-controller-manager
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: control-plane
|
||||
component: kube-controller-manager
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
hostNetwork: true
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
||||
containers:
|
||||
- name: kube-controller-manager
|
||||
image: gcr.io/google_containers/hyperkube-amd64:v1.6.2
|
||||
command:
|
||||
- ./hyperkube
|
||||
- controller-manager
|
||||
- --allocate-node-cidrs=true
|
||||
- --cluster-cidr={{ network.pod_ip_cidr }}
|
||||
- --cluster-signing-cert-file=/etc/kubernetes/pki/cluster-ca.pem
|
||||
- --cluster-signing-key-file=/etc/kubernetes/pki/cluster-ca-key.pem
|
||||
- --configure-cloud-routes=false
|
||||
- --leader-elect=true
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig.yaml
|
||||
- --root-ca-file=/etc/kubernetes/pki/cluster-ca.pem
|
||||
- --service-account-private-key-file=/etc/kubernetes/pki/sa-key.pem
|
||||
- --service-cluster-ip-range={{ network.service_ip_cidr }}
|
||||
- --use-service-account-credentials=true
|
||||
- --v=5
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/kubernetes
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: config
|
||||
hostPath:
|
||||
path: /etc/kubernetes/controller-manager
|
@ -0,0 +1,68 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-etcd
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: control-plane
|
||||
component: kube-etcd
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: k8s-etcd
|
||||
image: quay.io/coreos/etcd:v3.0.17
|
||||
env:
|
||||
- name: ETCD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: ETCD_CLIENT_CERT_AUTH
|
||||
value: "true"
|
||||
- name: ETCD_PEER_CLIENT_CERT_AUTH
|
||||
value: "true"
|
||||
- name: ETCD_DATA_DIR
|
||||
value: /var/lib/kube-etcd
|
||||
- name: ETCD_TRUSTED_CA_FILE
|
||||
value: /etc/etcd-pki/cluster-ca.pem
|
||||
- name: ETCD_CERT_FILE
|
||||
value: /etc/etcd-pki/etcd.pem
|
||||
- name: ETCD_KEY_FILE
|
||||
value: /etc/etcd-pki/etcd-key.pem
|
||||
- name: ETCD_PEER_TRUSTED_CA_FILE
|
||||
value: /etc/etcd-pki/cluster-ca.pem
|
||||
- name: ETCD_PEER_CERT_FILE
|
||||
value: /etc/etcd-pki/etcd.pem
|
||||
- name: ETCD_PEER_KEY_FILE
|
||||
value: /etc/etcd-pki/etcd-key.pem
|
||||
- name: ETCD_ADVERTISE_CLIENT_URLS
|
||||
value: https://$(ETCD_NAME):2379
|
||||
- name: ETCD_INITIAL_ADVERTISE_PEER_URLS
|
||||
value: https://$(ETCD_NAME):2380
|
||||
- name: ETCD_INITIAL_CLUSTER_TOKEN
|
||||
value: promenade-kube-etcd-token
|
||||
- name: ETCD_LISTEN_CLIENT_URLS
|
||||
value: https://0.0.0.0:2379
|
||||
- name: ETCD_LISTEN_PEER_URLS
|
||||
value: https://0.0.0.0:2380
|
||||
{%- for env_name, env_value in etcd['env'].items() %}
|
||||
- name: {{ env_name }}
|
||||
value: {{ env_value }}
|
||||
{%- endfor %}
|
||||
ports:
|
||||
- name: client
|
||||
containerPort: 2379
|
||||
- name: peer
|
||||
containerPort: 2380
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /var/lib/kube-etcd
|
||||
- name: pki
|
||||
mountPath: /etc/etcd-pki
|
||||
volumes:
|
||||
- name: data
|
||||
hostPath:
|
||||
path: /var/lib/kube-etcd
|
||||
- name: pki
|
||||
hostPath:
|
||||
path: /etc/kubernetes/etcd/pki
|
@ -0,0 +1,29 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-scheduler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: control-plane
|
||||
component: kube-scheduler
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kube-scheduler
|
||||
image: gcr.io/google_containers/hyperkube-amd64:v1.6.2
|
||||
command:
|
||||
- ./hyperkube
|
||||
- scheduler
|
||||
- --leader-elect=true
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig.yaml
|
||||
- --v=5
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/kubernetes
|
||||
volumes:
|
||||
- name: config
|
||||
hostPath:
|
||||
path: /etc/kubernetes/scheduler
|
@ -0,0 +1,20 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://kubernetes
|
||||
certificate-authority: /etc/kubernetes/pki/cluster-ca.pem
|
||||
name: kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubernetes
|
||||
user: scheduler
|
||||
name: scheduler@kubernetes
|
||||
current-context: scheduler@kubernetes
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: scheduler
|
||||
user:
|
||||
client-certificate: /etc/kubernetes/pki/scheduler.pem
|
||||
client-key: /etc/kubernetes/pki/scheduler-key.pem
|
5
requirements-frozen.txt
Normal file
5
requirements-frozen.txt
Normal file
@ -0,0 +1,5 @@
|
||||
click==6.7
|
||||
Jinja2==2.9.6
|
||||
MarkupSafe==1.0
|
||||
pbr==3.0.1
|
||||
PyYAML==3.12
|
4
requirements.txt
Normal file
4
requirements.txt
Normal file
@ -0,0 +1,4 @@
|
||||
click==6.7
|
||||
jinja2==2.9.6
|
||||
pbr==3.0.1
|
||||
pyyaml==3.12
|
@ -1,64 +0,0 @@
|
||||
# Copyright 2017 The Promenade Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
function validate_environment {
|
||||
local ERRORS=
|
||||
|
||||
if [ "x${NODE_HOSTNAME}" = "x" ]; then
|
||||
echo Error: NODE_HOSTNAME not defined, but required.
|
||||
ERRORS=1
|
||||
fi
|
||||
|
||||
if ! docker info; then
|
||||
cat <<EOS
|
||||
Error: Unable to run `docker info`. You must mount /var/run/docker.sock when
|
||||
you run this container, since it is used to launch containers on the host:
|
||||
docker run -v /var/run/docker.sock:/var/run/docker.sock ...
|
||||
EOS
|
||||
ERRORS=1
|
||||
fi
|
||||
|
||||
if [ ! -d /target/etc/systemd/system ]; then
|
||||
cat <<EOS
|
||||
Error: It appears that the host's root filesystem is not mounted at /target.
|
||||
Make sure it is mounted:
|
||||
docker run -v /:/target ...
|
||||
EOS
|
||||
ERRORS=1
|
||||
fi
|
||||
|
||||
if [ "x$ERRORS" != "x" ]; then
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function install_assets {
|
||||
mkdir /target/etc/kubernetes
|
||||
cp -R ./assets/* /target/etc/kubernetes
|
||||
}
|
||||
|
||||
function install_cni {
|
||||
mkdir -p /opt/cni/bin
|
||||
tar xf cni.tgz -C /opt/cni/bin/
|
||||
}
|
||||
|
||||
function install_kubelet {
|
||||
cp ./kubelet /target/usr/local/bin/kubelet
|
||||
|
||||
cat ./kubelet.service.template | envsubst > /target/etc/systemd/system/kubelet.service
|
||||
chown root:root /target/etc/systemd/system/kubelet.service
|
||||
chmod 644 /target/etc/systemd/system/kubelet.service
|
||||
|
||||
chroot --userspec root:root /target /bin/bash < ./scripts/start-kubelet.sh
|
||||
}
|
@ -1,39 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2017 The Promenade Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -ex
|
||||
|
||||
source ./scripts/env.sh
|
||||
source ./scripts/func.sh
|
||||
|
||||
validate_environment
|
||||
# XXX validate_genesis_assets
|
||||
|
||||
if [ -f "genesis_image_cache/genesis-images.tar" ]; then
|
||||
docker load -i ./genesis-images.tar
|
||||
else
|
||||
echo "Image Cache Not Found.. Skipping."
|
||||
fi
|
||||
|
||||
install_assets
|
||||
install_cni
|
||||
install_kubelet
|
||||
|
||||
docker run --rm \
|
||||
-v /etc/kubernetes:/etc/kubernetes \
|
||||
quay.io/coreos/bootkube:${BOOTKUBE_VERSION} \
|
||||
/bootkube start \
|
||||
--asset-dir=/etc/kubernetes
|
22
scripts/entrypoint-join.sh → setup.cfg
Executable file → Normal file
22
scripts/entrypoint-join.sh → setup.cfg
Executable file → Normal file
@ -1,5 +1,3 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2017 The Promenade Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -14,14 +12,18 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -ex
|
||||
[metadata]
|
||||
name = promenade
|
||||
|
||||
source ./scripts/env.sh
|
||||
source ./scripts/func.sh
|
||||
[pbr]
|
||||
use-egg = false
|
||||
|
||||
validate_environment
|
||||
# XXX validate_join_assets
|
||||
[files]
|
||||
packages =
|
||||
promenade
|
||||
package-data =
|
||||
templates = templates/*
|
||||
|
||||
install_assets
|
||||
install_cni
|
||||
install_kubelet
|
||||
[entry_points]
|
||||
console_scripts =
|
||||
promenade = promenade.cli:promenade
|
13
scripts/common/start-kubelet.sh → setup.py
Executable file → Normal file
13
scripts/common/start-kubelet.sh → setup.py
Executable file → Normal file
@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2017 The Promenade Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -14,8 +14,9 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -ex
|
||||
from setuptools import setup
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl enable kubelet.service
|
||||
systemctl start kubelet.service
|
||||
setup(
|
||||
setup_requires=['pbr>=1.9', 'setuptools>=17.1'],
|
||||
pbr=True,
|
||||
)
|
@ -1,20 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
|
||||
# Setup master
|
||||
vagrant ssh n0 <<EOS
|
||||
set -ex
|
||||
sudo docker load -i /vagrant/promenade-genesis.tar
|
||||
sudo docker run -v /:/target -v /var/run/docker.sock:/var/run/docker.sock -e NODE_HOSTNAME=n0 quay.io/attcomdev/promenade-genesis:dev
|
||||
EOS
|
||||
|
||||
# Join nodes
|
||||
for node in n1 n2; do
|
||||
vagrant ssh $node <<EOS
|
||||
set -ex
|
||||
sudo docker load -i /vagrant/promenade-join.tar
|
||||
# Should be: sudo docker run -v /:/target -e NODE_HOSTNAME=$node quay.io/attcomdev/promenade-join:dev
|
||||
sudo docker run -v /:/target -v /var/run/docker.sock:/var/run/docker.sock -e NODE_HOSTNAME=$node quay.io/attcomdev/promenade-join:dev
|
||||
EOS
|
||||
done
|
@ -1,3 +0,0 @@
|
||||
host-record=kubernetes,192.168.77.10
|
||||
host-record=kubernetes,192.168.77.11
|
||||
host-record=kubernetes,192.168.77.12
|
@ -1,3 +0,0 @@
|
||||
{
|
||||
"dns": ["172.17.0.1"]
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user