new file: files/X-csr.json

new file:   files/admin-csr.json
	new file:   files/ca-config.json
	new file:   files/ca-csr.json
	new file:   inventory
	new file:   inventory.orig
	new file:   k8s-deploy.yaml
	new file:   k8s-deploy/00-k8s-requirements.yaml
	new file:   k8s-deploy/01-k8s-certs-create.yaml
	new file:   k8s-deploy/02-k8s-certs-copy.yaml
	new file:   k8s-deploy/03-k8s-deploy-etcd.yaml
	new file:   k8s-deploy/04-k8s-controller-deploy.yaml
	new file:   k8s-deploy/05-k8s-nginx-deploy.yaml
	new file:   k8s-deploy/06-k8s-workers-certs-deploy.yaml
	new file:   k8s-deploy/07-k8s-worker-deploy.yaml
	new file:   k8s-deploy/08-rbac-clusterrole-create.yaml
	new file:   k8s-deploy/09-k8s-create-remote-admin.yaml
	new file:   k8s-deploy/README.md
	new file:   k8s-deploy/core-dns.yaml
	new file:   k8s-deploy/defaults/main.yml
	new file:   k8s-deploy/files/99_loopback.conf
	new file:   k8s-deploy/files/X-csr.json
	new file:   k8s-deploy/files/admin-csr.json
	new file:   k8s-deploy/files/arm/etcd
	new file:   k8s-deploy/files/arm/etcdctl
	new file:   k8s-deploy/files/ca-config.json
	new file:   k8s-deploy/files/ca-csr.json
	new file:   k8s-deploy/files/clusterrole-api-to-kubelet.yaml
	new file:   k8s-deploy/files/clusterrolebinding-api-to-kubelet.yaml
	new file:   k8s-deploy/files/config.toml
	new file:   k8s-deploy/files/containerd.service
	new file:   k8s-deploy/files/kube-controller-manager-csr.json
	new file:   k8s-deploy/files/kube-proxy-csr.json
	new file:   k8s-deploy/files/kube-proxy.service
	new file:   k8s-deploy/files/kube-scheduler-csr.json
	new file:   k8s-deploy/files/kubelet.service
	new file:   k8s-deploy/files/kubernetes-csr.json
	new file:   k8s-deploy/files/service-account-csr.json
	new file:   k8s-deploy/files/x86_64/etcd
	new file:   k8s-deploy/files/x86_64/etcdctl
	new file:   k8s-deploy/handlers/main.yml
	new file:   k8s-deploy/k8s-deploy.yaml
	new file:   k8s-deploy/k8s-uninstall.yaml
	new file:   k8s-deploy/meta/main.yml
	new file:   k8s-deploy/tasks/k8s-certs-create.yaml
	new file:   k8s-deploy/tasks/main.yml
	new file:   k8s-deploy/templates/10_bridge.conf.j2
	new file:   k8s-deploy/templates/encryption-config.j2
	new file:   k8s-deploy/templates/etcd.service-amd64.j2
	new file:   k8s-deploy/templates/etcd.service-arm.j2
	new file:   k8s-deploy/templates/kube-apiserver.service.j2
	new file:   k8s-deploy/templates/kube-controller-manager.service.j2
	new file:   k8s-deploy/templates/kube-proxy-config.yaml.j2
	new file:   k8s-deploy/templates/kube-scheduler.service.j2
	new file:   k8s-deploy/templates/kube-scheduler.yaml.j2
	new file:   k8s-deploy/templates/kubelet-config.yaml.j2
	new file:   k8s-deploy/templates/kubernetes.default.svc.cluster.local.j2
	new file:   k8s-deploy/templates/worker-csr_json.j2
	new file:   k8s-deploy/test.yaml
	new file:   k8s-deploy/tests/inventory
	new file:   k8s-deploy/tests/test.yml
	new file:   k8s-deploy/vars/main.yml
	new file:   rollback_k8s-deploy.yaml
Initial commit
This commit is contained in:
Jonny Ervine 2020-02-19 08:13:09 +00:00
parent bd11a30b86
commit 694af68fb1
63 changed files with 2059 additions and 0 deletions

16
files/X-csr.json Normal file
View File

@ -0,0 +1,16 @@
{
"CN": "system:node:NUMBER",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "system:nodes",
"OU": "Kubernetes - CentOS",
"ST": "Oregon"
}
]
}

16
files/admin-csr.json Normal file
View File

@ -0,0 +1,16 @@
{
"CN": "admin",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "HK",
"L": "Hong Kong",
"O": "system:masters",
"OU": "Kubernetes via Ansible",
"ST": "Hong Kong"
}
]
}

13
files/ca-config.json Normal file
View File

@ -0,0 +1,13 @@
{
"signing": {
"default": {
"expiry": "8760h"
},
"profiles": {
"kubernetes": {
"usages": ["signing", "key encipherment", "server auth", "client auth"],
"expiry": "8760h"
}
}
}
}

16
files/ca-csr.json Normal file
View File

@ -0,0 +1,16 @@
{
"CN": "Kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "HK",
"L": "Hong Kong",
"O": "Kubernetes",
"OU": "CA",
"ST": "Hong Kong"
}
]
}

13
inventory Normal file
View File

@ -0,0 +1,13 @@
[kubernetes]
[masters]
tb-blue.kube.ipa.champion
[workers]
[others]
localhost
[all]
rpi-builder.ipa.champion
tb-blue.kube.ipa.champion

12
inventory.orig Normal file
View File

@ -0,0 +1,12 @@
[kubernetes]
[masters]
debian-k8s-master1.ipa.champion
debian-k8s-master2.ipa.champion
[workers]
debian-k8s-node1.ipa.champion
debian-k8s-node2.ipa.champion
[others]
localhost

35
k8s-deploy.yaml Normal file
View File

@ -0,0 +1,35 @@
---
- name: Playbook to automate a manual k8s installation
hosts: localhost
become: true
tasks:
- name: Download and install the cfssl utility
get_url:
url: https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
dest: /usr/local/bin/cfssl
mode: 0755
- name: Download and install the cfssljson utility
get_url:
url: https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
dest: /usr/local/bin/cfssljson
mode: 0755
- name: Put the seed key material files in place
file:
path: /var/tmp/kubernetes
state: directory
- copy:
src: files/{{ item }}
dest: /var/tmp/kubernetes/
with_items:
- ca-csr.json
- admin-csr.json
- ca-config.json
- name: Create the CA
shell: /usr/local/bin/cfssl gencert -initca ca-csr.json | /usr/local/bin/cfssljson -bare ca
args:
chdir: /var/tmp/kubernetes
- name: Create the admin KMOs
shell: /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | /usr/local/bin/cfssljson -bare admin
args:
chdir: /var/tmp/kubernetes

View File

@ -0,0 +1,12 @@
---
- name: Pre-requisites for installing k8s
hosts: masters
become: true
tasks:
- name: Install required packages
apt:
name: "{{ item }}"
state: present
with_items:
- "python"
- "ca-certificates"

View File

@ -0,0 +1,130 @@
---
- name: Playbook to automate a manual k8s installation
hosts: localhost
vars:
worker_name:
- debian-k8s-node1
- debian-k8s-node2
server_name: "{{ item }}"
haproxy_addr: "192.168.11.58"
etcd_host1_ip: "192.168.11.167"
etcd_host2_ip: "192.168.11.94"
kube_cluster: "kubernetes"
become: true
tasks:
- name: Download and install the cfssl utility
get_url:
url: https://pkg.cfssl.org/R1.2/{{ item }}_linux-amd64
dest: /usr/local/bin/{{ item }}
mode: 0755
with_items:
- cfssl
- cfssljson
- name: Put the seed key material files in place
file:
path: /var/tmp/kubernetes
state: directory
- copy:
src: files/{{ item }}
dest: /var/tmp/kubernetes/
mode: preserve
with_items:
- ca-csr.json
- admin-csr.json
- ca-config.json
- kube-controller-manager-csr.json
- kube-proxy-csr.json
- kube-scheduler-csr.json
- kubernetes-csr.json
- service-account-csr.json
- template:
src: templates/worker-csr_json.j2
dest: /var/tmp/kubernetes/{{ item }}-csr.json
mode: preserve
with_items:
- "{{ worker_name }}"
- name: Create the CA
shell: /usr/local/bin/cfssl gencert -initca ca-csr.json | /usr/local/bin/cfssljson -bare ca
args:
chdir: /var/tmp/kubernetes
- name: Create the admin KMOs
shell: /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | /usr/local/bin/cfssljson -bare admin
args:
chdir: /var/tmp/kubernetes
- name: Create the worker node certificates
shell: /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -hostname={{ item }} -profile=kubernetes {{ item }}-csr.json | /usr/local/bin/cfssljson -bare {{ item }}
args:
chdir: /var/tmp/kubernetes
with_items:
- "{{ worker_name }}"
- name: Create the kube-controller-manager KMOs
shell: /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | /usr/local/bin/cfssljson -bare kube-controller-manager
args:
chdir: /var/tmp/kubernetes
- name: Create the kube-proxy KMOs
shell: /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | /usr/local/bin/cfssljson -bare kube-proxy
args:
chdir: /var/tmp/kubernetes
- name: Create the kube-scheduler KMOs
shell: /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | /usr/local/bin/cfssljson -bare kube-scheduler
args:
chdir: /var/tmp/kubernetes
- name: Create the kubernetes cluster KMOs
shell: /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -hostname=10.32.0.1,"{{ etcd_host1_ip }}","{{ etcd_host2_ip }}","{{ haproxy_addr }}",127.0.0.1,kubernetes.default -profile=kubernetes kubernetes-csr.json | /usr/local/bin/cfssljson -bare kubernetes
args:
chdir: /var/tmp/kubernetes
- name: Create the kubernetes service account KMOs
shell: /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes service-account-csr.json | /usr/local/bin/cfssljson -bare service-account
args:
chdir: /var/tmp/kubernetes
- name: Create the worker node kubeconfig files
shell: kubectl config set-cluster {{ kube_cluster }} --certificate-authority=ca.pem --embed-certs=true --server=https://{{ haproxy_addr }}:6443 --kubeconfig={{ item }}.kubeconfig; kubectl config set-credentials system:node:{{ item }} --client-certificate={{ item }}.pem --client-key={{ item }}-key.pem --embed-certs=true --kubeconfig={{ item }}.kubeconfig; kubectl config set-context default --cluster={{ kube_cluster }} --user=system:node:{{ item }} --kubeconfig={{ item }}.kubeconfig; kubectl config use-context default --kubeconfig={{ item }}.kubeconfig
args:
chdir: /var/tmp/kubernetes
with_items:
- "{{ worker_name }}"
- name: Create the kube-proxy kubeconfig file
shell: kubectl config set-cluster {{ kube_cluster }} --certificate-authority=ca.pem --embed-certs=true --server=https://{{ haproxy_addr }}:6443 --kubeconfig=kube-proxy.kubeconfig; kubectl config set-credentials system:kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig; kubectl config set-context default --cluster={{ kube_cluster }} --user=system:kube-proxy --kubeconfig=kube-proxy.kubeconfig; kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
args:
chdir: /var/tmp/kubernetes
- name: Create the controller-manager kubeconfig file
shell: kubectl config set-cluster {{ kube_cluster }} --certificate-authority=ca.pem --embed-certs=true --server=https://127.0.0.1:6443 --kubeconfig=kube-controller-manager.kubeconfig; kubectl config set-credentials system:kube-controller-manager --client-certificate=kube-controller-manager.pem --client-key=kube-controller-manager-key.pem --embed-certs=true --kubeconfig=kube-controller-manager.kubeconfig; kubectl config set-context default --cluster={{ kube_cluster }} --user=system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig; kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig
args:
chdir: /var/tmp/kubernetes
- name: Create the kube-scheduler kubeconfig file
shell: kubectl config set-cluster {{ kube_cluster }} --certificate-authority=ca.pem --embed-certs=true --server=https://127.0.0.1:6443 --kubeconfig=kube-scheduler.kubeconfig; kubectl config set-credentials system:kube-scheduler --client-certificate=kube-scheduler.pem --client-key=kube-scheduler-key.pem --embed-certs=true --kubeconfig=kube-scheduler.kubeconfig; kubectl config set-context default --cluster={{ kube_cluster }} --user=system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig; kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig
args:
chdir: /var/tmp/kubernetes
- name: Create admin kubeconfig file
shell: kubectl config set-cluster {{ kube_cluster }} --certificate-authority=ca.pem --embed-certs=true --server=https://127.0.0.1:6443 --kubeconfig=admin.kubeconfig; kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=admin.kubeconfig; kubectl config set-context default --cluster={{ kube_cluster }} --user=admin --kubeconfig=admin.kubeconfig; kubectl config use-context default --kubeconfig=admin.kubeconfig
args:
chdir: /var/tmp/kubernetes
- name: Create data encryption key
shell: head -c 32 /dev/urandom | base64
register: enc_key
- name: Generate the encryption file
template:
src: templates/encryption-config.j2
dest: /var/tmp/kubernetes/encryption-config.yaml
- name: Set the owner of files to be ansible
file:
path: /var/tmp/kubernetes
owner: jonny
recurse: true

View File

@ -0,0 +1,51 @@
---
- name: Copy necessary files to controllers
hosts: masters
vars:
kube_files:
- ca.pem
- ca-key.pem
- kubernetes-key.pem
- kubernetes.pem
- service-account.pem
- service-account-key.pem
- kube-controller-manager.kubeconfig
- kube-scheduler.kubeconfig
- encryption-config.yaml
etcd_files:
- ca.pem
- kubernetes-key.pem
- kubernetes.pem
become: true
tasks:
- name: Create etcd directories
file:
path: /etc/etcd
state: directory
- name: Create var lib kubernetes directory
file:
path: /var/lib/kubernetes
state: directory
- name: Copy files to kubernetes directory
copy:
src: /var/tmp/kubernetes/{{ item }}
dest: /var/lib/kubernetes/{{ item }}
mode: preserve
owner: root
group: root
with_items:
- "{{ kube_files }}"
- name: Copy files to etcd directory
copy:
src: /var/tmp/kubernetes/{{ item }}
dest: /etc/etcd/{{ item }}
mode: preserve
owner: root
group: root
with_items:
- "{{ etcd_files }}"
- name: Copy admin.kubeconfig to ansible home directory
copy:
src: /var/tmp/kubernetes/admin.kubeconfig
dest: /home/ansible/admin.kubeconfig

View File

@ -0,0 +1,58 @@
---
- name: Setting up etcd on the controller nodes
hosts: masters
vars:
etcd_name: kube_etcd
etcd_host1: "debian-k8s-master1"
etcd_host2: "debian-k8s-master2"
etcd_host1_ip: "192.168.11.167"
etcd_host2_ip: "192.168.11.94"
kube_arch: "{{ 'arm' if ansible_architecture == 'armv7l' else 'amd64' }}"
become: true
tasks:
- name: Copy the etcd binary
copy:
src: files/x86_64/{{ item }}
dest: /usr/local/bin/
mode: 755
with_items:
- etcd
- etcdctl
when:
- ansible_architecture == "x86_64"
- name: Copy the etcd binary
copy:
src: files/arm/{{ item }}
dest: /usr/local/bin/
mode: 755
with_items:
- etcd
- etcdctl
when:
- ansible_architecture == "armv7l"
- name: Creating the etcd service file
template:
src: templates/etcd.service-{{ kube_arch }}.j2
dest: /etc/systemd/system/etcd.service
- name: Create the etcd var directory
file:
path: /var/lib/etcd
state: directory
- name: Delete any existing etcd contents
file:
path: /var/lib/etcd/member
state: absent
- name: Reload systemd
command: systemctl daemon-reload
- name: Start and enable the etcd service
service:
name: etcd
state: started
enabled: true

View File

@ -0,0 +1,56 @@
---
- name: Setting up the controller nodes
hosts: masters
vars:
etcd_host1_ip: 192.168.11.167
etcd_host2_ip: 192.168.11.94
kube_ver: 1.11.3
kube_arch: "{{ 'arm' if ansible_architecture == 'armv7l' else 'amd64' }}"
become: true
tasks:
- name: Provision the kubernetes Control Plane
file:
path: /etc/kubernetes/config
state: directory
- name: Download the kubernetes binaries
get_url:
url: https://storage.googleapis.com/kubernetes-release/release/v{{ kube_ver }}/bin/linux/{{ kube_arch }}/{{ item }}
dest: /usr/local/bin
mode: 0755
with_items:
- kube-apiserver
- kube-controller-manager
- kube-scheduler
- kubectl
- name: Configure the API server
template:
src: templates/kube-apiserver.service.j2
dest: /etc/systemd/system/kube-apiserver.service
- name: Configure the Controller Manager server
template:
src: templates/kube-controller-manager.service.j2
dest: /etc/systemd/system/kube-controller-manager.service
- name: Configure the Scheduler server
template:
src: templates/kube-scheduler.service.j2
dest: /etc/systemd/system/kube-scheduler.service
- name: Copy in the kube-scheduler config file
template:
src: templates/kube-scheduler.yaml.j2
dest: /etc/kubernetes/config/kube-scheduler.yaml
- name: Reload systemd
command: systemctl daemon-reload
- name: Start and enable the kubernetes services
service:
name: "{{ item }}"
state: started
enabled: true
with_items:
- "kube-apiserver"
- "kube-controller-manager"
- "kube-scheduler"

View File

@ -0,0 +1,23 @@
---
- name: Playbook to automate a manual k8s installation
hosts: masters
become: true
tasks:
- name: Enable API server health checks
apt:
name: nginx
state: present
- name: Configure NGINX correctly
template:
src: templates/kubernetes.default.svc.cluster.local.j2
dest: /etc/nginx/sites-available/kubernetes.default.svc.cluster.local
- name: Activate the configuration
file:
src: /etc/nginx/sites-available/kubernetes.default.svc.cluster.local
path: /etc/nginx/sites-enabled/kubernetes.default.svc.cluster.local
state: link
- name: Start the NGINX service
service:
name: nginx
state: started
enabled: true

View File

@ -0,0 +1,59 @@
---
- name: Copy necessary files to worker nodes
hosts: workers
vars:
kubernetes_files:
- ca.pem
kubelet_files:
- kube-worker.kubeconfig
kube_proxy_files:
- kube-proxy.kubeconfig
workers:
- debian-k8s-node1
- debian-k8s-node2
become: true
tasks:
- name: Create the var lib kubernetes directory
file:
path: /var/lib/kubernetes
state: directory
- name: Create the var lib kubelet directory
file:
path: /var/lib/kubelet
state: directory
- name: Create the var lib kube-proxy directory
file:
path: /var/lib/kube-proxy
state: directory
- name: Copy the files to kubernetes directory
copy:
src: /var/tmp/kubernetes/{{ item }}
dest: /var/lib/kubernetes/{{ item }}
mode: preserve
with_items:
- "{{ kubernetes_files }}"
- name: Copy kubeconfig file to the kubelet directory
copy:
src: /var/tmp/kubernetes/{{ ansible_hostname }}.kubeconfig
dest: /var/lib/kubelet/kubeconfig
mode: preserve
with_items:
- name: Copy worker node pem file to kubelet directory
copy:
src: /var/tmp/kubernetes/{{ item }}.pem
dest: /var/lib/kubelet/{{ item }}.pem
mode: preserve
with_items:
- "{{ workers }}"
- name: Copy worker node key pem file to kubelet directory
copy:
src: /var/tmp/kubernetes/{{ item }}-key.pem
dest: /var/lib/kubelet/{{ item }}-key.pem
mode: preserve
with_items:
- "{{ workers }}"
- name: Copy kube-proxy kubeconfig file to kube-proxy directory
copy:
src: /var/tmp/kubernetes/kube-proxy.kubeconfig
dest: /var/lib/kube-proxy/kubeconfig
mode: preserve

View File

@ -0,0 +1,183 @@
---
- name: Copy necessary files to worker nodes
hosts: workers
vars:
kubernetes_files:
- ca.pem
kubelet_files:
- kube-worker.kubeconfig
kube_proxy_files:
- kube-proxy.kubeconfig
workers:
- debian-k8s-node1
- debian-k8s-node2
become: true
tasks:
- name: Create the var lib kubernetes directory
file:
path: /var/lib/kubernetes
state: directory
- name: Create the var lib kubelet directory
file:
path: /var/lib/kubelet
state: directory
- name: Create the var lib kube-proxy directory
file:
path: /var/lib/kube-proxy
state: directory
- name: Copy the files to kubernetes directory
copy:
src: /var/tmp/kubernetes/{{ item }}
dest: /var/lib/kubernetes/{{ item }}
mode: preserve
with_items:
- "{{ kubernetes_files }}"
- name: Copy kubeconfig file to the kubelet directory
copy:
src: /var/tmp/kubernetes/{{ item }}.kubeconfig
dest: /var/lib/kubelet/kubeconfig
mode: preserve
with_items:
- "{{ workers }}"
- name: Copy worker node pem file to kubelet directory
copy:
src: /var/tmp/kubernetes/{{ item }}.pem
dest: /var/lib/kubelet/{{ item }}.pem
mode: preserve
with_items:
- "{{ workers }}"
- name: Copy worker node key pem file to kubelet directory
copy:
src: /var/tmp/kubernetes/{{ item }}-key.pem
dest: /var/lib/kubelet/{{ item }}-key.pem
mode: preserve
with_items:
- "{{ workers }}"
- name: Copy kube-proxy kubeconfig file to kube-proxy directory
copy:
src: /var/tmp/kubernetes/kube-proxy.kubeconfig
dest: /var/lib/kube-proxy/kubeconfig
mode: preserve
- name: Download and install the Kubernetes binaries
hosts: workers
become: true
vars:
tasks:
- name: Install dependencies
apt:
name: "{{ item }}"
state: present
with_items:
- "socat"
- "conntrack"
- "ipset"
- name: Download and install worker binaries
get_url:
url: "{{ item }}"
dest: /usr/local/bin
mode: 0755
with_items:
- "https://storage.googleapis.com/kubernetes-release/release/v1.11.2/bin/linux/amd64/kubectl"
- "https://storage.googleapis.com/kubernetes-release/release/v1.11.2/bin/linux/amd64/kube-proxy"
- "https://storage.googleapis.com/kubernetes-release/release/v1.11.2/bin/linux/amd64/kubelet"
- "https://storage.googleapis.com/kubernetes-the-hard-way/runsc"
- name: Download utilities
get_url:
url: "{{ item }}"
dest: /var/tmp/
with_items:
- "https://github.com/kubernetes-incubator/cri-tools/releases/download/v1.11.1/crictl-v1.11.1-linux-amd64.tar.gz"
- "https://github.com/containernetworking/plugins/releases/download/v0.7.1/cni-plugins-amd64-v0.7.1.tgz"
- "https://github.com/containerd/containerd/releases/download/v1.2.0-beta.2/containerd-1.2.0-beta.2.linux-amd64.tar.gz"
- name: Download runc
get_url:
url: https://github.com/opencontainers/runc/releases/download/v1.0.0-rc5/runc.amd64
dest: /usr/local/bin/runc
mode: 0755
- name: Create installation directories
file:
path: "{{ item }}"
state: directory
with_items:
- "/etc/cni/net.d"
- "/opt/cni/bin"
- "/var/lib/kubelet"
- "/var/lib/kube-proxy"
- "/var/lib/kubernetes"
- "/var/run/kubernetes"
- "/etc/containerd"
- name: Extract crictl binary
unarchive:
remote_src: yes
src: /var/tmp/crictl-v1.11.1-linux-amd64.tar.gz
dest: /usr/local/bin
mode: 0755
- name: Extract cniplugins binaries
unarchive:
remote_src: yes
src: /var/tmp/cni-plugins-amd64-v0.7.1.tgz
dest: /opt/cni/bin
mode: 0755
- name: Extract containerd binaries
unarchive:
remote_src: yes
src: /var/tmp/containerd-1.2.0-beta.2.linux-amd64.tar.gz
dest: /
mode: 0755
- name: Create the CNI configuration
hosts: workers
become: true
vars:
pod_cidr: 10.200.0.0/24
cluster_cidr: 10.200.0.0/16
tasks:
- name: Create bridge.conf file
template:
src: templates/10_bridge.conf.j2
dest: /etc/cni/net.d/10_bridge.conf
- name: Create loopback file
copy:
src: files/99_loopback.conf
dest: /etc/cni/net.d/99_loopback.conf
- name: Create containerd configuration
copy:
src: files/config.toml
dest: /etc/containerd/config.toml
- name: Create containerd service file
copy:
src: files/containerd.service
dest: /etc/systemd/system/containerd.service
- name: Create kubelet-config.yaml file
template:
src: templates/kubelet-config.yaml.j2
dest: /var/lib/kubelet/kubelet-config.yaml
- name: Create the kubelet service file
copy:
src: files/kubelet.service
dest: /etc/systemd/system/kubelet.service
- name: Create the kube-proxy-config.yaml file
template:
src: templates/kube-proxy-config.yaml.j2
dest: /var/lib/kube-proxy/kube-proxy-config.yaml
- name: Create the kube-proxy.service file
copy:
src: files/kube-proxy.service
dest: /etc/systemd/system/kube-proxy.service
- name: Reload systemd configuration
command: systemctl daemon-reload
- name: Start and enable the worker daemons
service:
name: "{{ item }}"
state: started
enabled: true
with_items:
- "containerd"
- "kubelet"
- "kube-proxy"

View File

@ -0,0 +1,17 @@
---
- name: Create the RBAC cluster role for kubelet authorization
hosts: debian-k8s-master1.ipa.champion
become: true
tasks:
- name: Copy the clusterrole and clusterrolebinding RBAC yaml files
copy:
src: files/{{ item }}
dest: /home/ansible/{{ item }}
with_items:
- clusterrole-api-to-kubelet.yaml
- clusterrolebinding-api-to-kubelet.yaml
- name: Apply the RBAC role to the cluster
command: kubectl apply -f {{ item }} --kubeconfig /home/ansible/admin.kubeconfig
with_items:
- clusterrole-api-to-kubelet.yaml
- clusterrolebinding-api-to-kubelet.yaml

View File

@ -0,0 +1,12 @@
---
- name: Create aa admin kubeconfig for remote adminsitration
hosts: localhost
become: true
vars:
haproxy_addr: 192.168.11.58
kube_cluster: kubernetes
tasks:
- name: Create remote admin kubeconfig file
shell: kubectl config set-cluster {{ kube_cluster }} --certificate-authority=ca.pem --embed-certs=true --server=https://{{ haproxy_addr }}:6443 --kubeconfig=remote_admin.kubeconfig; kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=remote_admin.kubeconfig; kubectl config set-context default --cluster={{ kube_cluster }} --user=admin --kubeconfig=remote_admin.kubeconfig; kubectl config use-context default --kubeconfig=remote_admin.kubeconfig
args:
chdir: /var/tmp/kubernetes

38
k8s-deploy/README.md Normal file
View File

@ -0,0 +1,38 @@
Role Name
=========
A brief description of the role goes here.
Requirements
------------
Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
Role Variables
--------------
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
Dependencies
------------
A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
Example Playbook
----------------
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
- hosts: servers
roles:
- { role: username.rolename, x: 42 }
License
-------
BSD
Author Information
------------------
An optional section for the role authors to include contact information, or a website (HTML is not allowed).

165
k8s-deploy/core-dns.yaml Normal file
View File

@ -0,0 +1,165 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
proxy . /etc/resolv.conf
cache 30
reload
loadbalance
}
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
serviceAccountName: coredns
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: "CriticalAddonsOnly"
operator: "Exists"
containers:
- name: coredns
image: coredns/coredns:1.2.0
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: core-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.32.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP

View File

@ -0,0 +1,2 @@
---
# defaults file for k8s-deploy

View File

@ -0,0 +1,4 @@
{
"cniVersion": "0.3.1",
"type": "loopback"
}

View File

@ -0,0 +1,16 @@
{
"CN": "system:node:NUMBER",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "system:nodes",
"OU": "Kubernetes - CentOS",
"ST": "Oregon"
}
]
}

View File

@ -0,0 +1,16 @@
{
"CN": "admin",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "HK",
"L": "Hong Kong",
"O": "system:masters",
"OU": "Kubernetes via Ansible",
"ST": "Hong Kong"
}
]
}

BIN
k8s-deploy/files/arm/etcd Executable file

Binary file not shown.

BIN
k8s-deploy/files/arm/etcdctl Executable file

Binary file not shown.

View File

@ -0,0 +1,13 @@
{
"signing": {
"default": {
"expiry": "8760h"
},
"profiles": {
"kubernetes": {
"usages": ["signing", "key encipherment", "server auth", "client auth"],
"expiry": "8760h"
}
}
}
}

View File

@ -0,0 +1,16 @@
{
"CN": "Kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "HK",
"L": "Hong Kong",
"O": "Kubernetes",
"OU": "CA",
"ST": "Hong Kong"
}
]
}

View File

@ -0,0 +1,19 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-apiserver-to-kubelet
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
- nodes/metrics
verbs:
- "*"

View File

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: system:kube-apiserver
namespace: ""
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-apiserver-to-kubelet
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubernetes

View File

@ -0,0 +1,11 @@
[plugins]
[plugins.cri.containerd]
snapshotter = "overlayfs"
[plugins.cri.containerd.default_runtime]
runtime_type = "io.containerd.runtime.v1.linux"
runtime_engine = "/usr/local/bin/runc"
runtime_root = ""
[plugins.cri.containerd.untrusted_workload_runtime]
runtime_type = "io.containerd.runtime.v1.linux"
runtime_engine = "/usr/local/bin/runsc"
runtime_root = "/run/containerd/runsc"

View File

@ -0,0 +1,19 @@
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target
[Service]
ExecStartPre=/sbin/modprobe overlay
ExecStart=/bin/containerd
Restart=always
RestartSec=5
Delegate=yes
KillMode=process
OOMScoreAdjust=-999
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,16 @@
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "HK",
"L": "Hong Kong",
"O": "system:kube-controller-manager",
"OU": "Kubernetes - Ansible",
"ST": "Hong Kong"
}
]
}

View File

@ -0,0 +1,16 @@
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "HK",
"L": "Hong Kong",
"O": "system:node-proxier",
"OU": "Kubernetes - Ansible",
"ST": "Hong Kong"
}
]
}

View File

@ -0,0 +1,12 @@
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-proxy \
--config=/var/lib/kube-proxy/kube-proxy-config.yaml
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,16 @@
{
"CN": "system:kube-scheduler",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "HK",
"L": "Hong Kong",
"O": "system:kube-scheduler",
"OU": "Kubernetes - Ansible",
"ST": "Hong Kong"
}
]
}

View File

@ -0,0 +1,21 @@
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=containerd.service
Requires=containerd.service
[Service]
ExecStart=/usr/local/bin/kubelet \
--config=/var/lib/kubelet/kubelet-config.yaml \
--container-runtime=remote \
--container-runtime-endpoint=unix:///var/run/containerd/containerd.sock \
--image-pull-progress-deadline=2m \
--kubeconfig=/var/lib/kubelet/kubeconfig \
--network-plugin=cni \
--register-node=true \
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,16 @@
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "HK",
"L": "Hong Kong",
"O": "Kubernetes",
"OU": "Kubernetes - Ansible",
"ST": "Hong Kong"
}
]
}

View File

@ -0,0 +1,16 @@
{
"CN": "service-accounts",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "HK",
"L": "Hong Kong",
"O": "Kubernetes",
"OU": "Kubernetes - Ansible",
"ST": "Hong Kong"
}
]
}

BIN
k8s-deploy/files/x86_64/etcd Executable file

Binary file not shown.

BIN
k8s-deploy/files/x86_64/etcdctl Executable file

Binary file not shown.

View File

@ -0,0 +1,2 @@
---
# handlers file for k8s-deploy

332
k8s-deploy/k8s-deploy.yaml Normal file
View File

@ -0,0 +1,332 @@
---
- name: Playbook to automate a manual k8s installation
hosts: localhost
vars:
worker_name:
- debian-k8s-node1
- debian-k8s-node2
server_name: "{{ item }}"
haproxy_addr: "192.168.11.58"
kube_cluster: "kubernetes"
become: true
tasks:
- name: Download and install the cfssl utility
get_url:
url: https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
dest: /usr/local/bin/cfssl
mode: 0755
- name: Download and install the cfssljson utility
get_url:
url: https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
dest: /usr/local/bin/cfssljson
mode: 0755
- name: Put the seed key material files in place
file:
path: /var/tmp/kubernetes
state: directory
- copy:
src: files/{{ item }}
dest: /var/tmp/kubernetes/
mode: preserve
with_items:
- ca-csr.json
- admin-csr.json
- ca-config.json
- kube-controller-manager-csr.json
- kube-proxy-csr.json
- kube-scheduler-csr.json
- kubernetes-csr.json
- service-account-csr.json
- template:
src: templates/worker-csr_json.j2
dest: /var/tmp/kubernetes/{{ item }}-csr.json
mode: preserve
with_items:
- "{{ worker_name }}"
- name: Create the CA
shell: /usr/local/bin/cfssl gencert -initca ca-csr.json | /usr/local/bin/cfssljson -bare ca
args:
chdir: /var/tmp/kubernetes
- name: Create the admin KMOs
shell: /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | /usr/local/bin/cfssljson -bare admin
args:
chdir: /var/tmp/kubernetes
- name: Create the worker node certificates
shell: /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -hostname={{ item }} -profile=kubernetes {{ item }}-csr.json | /usr/local/bin/cfssljson -bare {{ item }}
args:
chdir: /var/tmp/kubernetes
with_items:
- "{{ worker_name }}"
- name: Create the kube-controller-manager KMOs
shell: /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | /usr/local/bin/cfssljson -bare kube-controller-manager
args:
chdir: /var/tmp/kubernetes
- name: Create the kube-proxy KMOs
shell: /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | /usr/local/bin/cfssljson -bare kube-proxy
args:
chdir: /var/tmp/kubernetes
- name: Create the kube-scheduler KMOs
shell: /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | /usr/local/bin/cfssljson -bare kube-scheduler
args:
chdir: /var/tmp/kubernetes
- name: Create the kubernetes cluster KMOs
shell: /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -hostname=10.32.0.1,10.240.0.10,10.240.0.11,"{{ haproxy_addr }}",127.0.0.1,kubernetes.default -profile=kubernetes kubernetes-csr.json | /usr/local/bin/cfssljson -bare kubernetes
args:
chdir: /var/tmp/kubernetes
- name: Create the kubernetes service account KMOs
shell: /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes service-account-csr.json | /usr/local/bin/cfssljson -bare service-account
args:
chdir: /var/tmp/kubernetes
- name: Create the worker node kubeconfig files
shell: kubectl config set-cluster {{ kube_cluster }} --certificate-authority=ca.pem --embed-certs=true --server=https://{{ haproxy_addr }}:6443 --kubeconfig={{ item }}.kubeconfig; kubectl config set-credentials system:node:{{ item }} --client-certificate={{ item }}.pem --client-key={{ item }}-key.pem --embed-certs=true --kubeconfig={{ item }}.kubeconfig; kubectl config set-context default --cluster={{ kube_cluster }} --user=system:node:{{ item }} --kubeconfig={{ item }}.kubeconfig; kubectl config use-context default --kubeconfig={{ item }}.kubeconfig
args:
chdir: /var/tmp/kubernetes
with_items:
- "{{ worker_name }}"
- name: Create the kube-proxy kubeconfig file
shell: kubectl config set-cluster {{ kube_cluster }} --certificate-authority=ca.pem --embed-certs=true --server=https://{{ haproxy_addr }}:6443 --kubeconfig=kube-proxy.kubeconfig; kubectl config set-credentials system:kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig; kubectl config set-context default --cluster={{ kube_cluster }} --user=system:kube-proxy --kubeconfig=kube-proxy.kubeconfig; kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
args:
chdir: /var/tmp/kubernetes
- name: Create the controller-manager kubeconfig file
shell: kubectl config set-cluster {{ kube_cluster }} --certificate-authority=ca.pem --embed-certs=true --server=https://127.0.0.1:6443 --kubeconfig=kube-controller-manager.kubeconfig; kubectl config set-credentials system:kube-controller-manager --client-certificate=kube-controller-manager.pem --client-key=kube-controller-manager-key.pem --embed-certs=true --kubeconfig=kube-controller-manager.kubeconfig; kubectl config set-context default --cluster={{ kube_cluster }} --user=system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig; kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig
args:
chdir: /var/tmp/kubernetes
- name: Create the kube-scheduler kubeconfig file
shell: kubectl config set-cluster {{ kube_cluster }} --certificate-authority=ca.pem --embed-certs=true --server=https://127.0.0.1:6443 --kubeconfig=kube-scheduler.kubeconfig; kubectl config set-credentials system:kube-scheduler --client-certificate=kube-scheduler.pem --client-key=kube-scheduler-key.pem --embed-certs=true --kubeconfig=kube-scheduler.kubeconfig; kubectl config set-context default --cluster={{ kube_cluster }} --user=system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig; kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig
args:
chdir: /var/tmp/kubernetes
- name: Create admin kubeconfig file
shell: kubectl config set-cluster {{ kube_cluster }} --certificate-authority=ca.pem --embed-certs=true --server=https://127.0.0.1:6443 --kubeconfig=admin.kubeconfig; kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=admin.kubeconfig; kubectl config set-context default --cluster={{ kube_cluster }} --user=admin --kubeconfig=admin.kubeconfig; kubectl config use-context default --kubeconfig=admin.kubeconfig
args:
chdir: /var/tmp/kubernetes
- name: Create data encryption key
shell: head -c 32 /dev/urandom | base64
register: enc_key
- name: Generate the encryption file
template:
src: templates/encryption-config.j2
dest: /var/tmp/kubernetes/encryption-config.yaml
- name: Set the owner of files to be ansible
file:
path: /var/tmp/kubernetes
owner: jonny
recurse: true
- name: Copy necessary files to controllers
hosts: masters
vars:
kube_files:
- ca.pem
- ca-key.pem
- kubernetes-key.pem
- kubernetes.pem
- service-account.pem
- service-account-key.pem
- kube-controller-manager.kubeconfig
- kube-scheduler.kubeconfig
- encryption-config.yaml
etcd_files:
- ca.pem
- kubernetes-key.pem
- kubernetes.pem
become: true
tasks:
- name: Create etcd directories
file:
path: /etc/etcd
state: directory
- name: Create var lib kubernetes directory
file:
path: /var/lib/kubernetes
state: directory
- name: Copy files to kubernetes directory
copy:
src: /var/tmp/kubernetes/{{ item }}
dest: /var/lib/kubernetes/{{ item }}
mode: preserve
with_items:
- "{{ kube_files }}"
- name: Copy files to etcd directory
copy:
src: /var/tmp/kubernetes/{{ item }}
dest: /etc/etcd/{{ item }}
mode: preserve
with_items:
- "{{ etcd_files }}"
- name: Copy necessary files to worker nodes
hosts: workers
vars:
kubernetes_files:
- ca.pem
kubelet_files:
- kube-worker.kubeconfig
kube_proxy_files:
- kube-proxy.kubeconfig
workers:
- debian-k8s-node1
- debian-k8s-node2
become: true
tasks:
- name: Create the var lib kubernetes directory
file:
path: /var/lib/kubernetes
state: directory
- name: Create the var lib kubelet directory
file:
path: /var/lib/kubelet
state: directory
- name: Create the var lib kube-proxy directory
file:
path: /var/lib/kube-proxy
state: directory
- name: Copy the files to kubernetes directory
copy:
src: /var/tmp/kubernetes/{{ item }}
dest: /var/lib/kubernetes/{{ item }}
mode: preserve
with_items:
- "{{ kubernetes_files }}"
- name: Copy kubeconfig file to the kubelet directory
copy:
src: /var/tmp/kubernetes/{{ item }}.kubeconfig
dest: /var/lib/kubelet/kubeconfig
mode: preserve
with_items:
- "{{ workers }}"
- name: Copy worker node pem file to kubelet directory
copy:
src: /var/tmp/kubernetes/{{ item }}.pem
dest: /var/lib/kubelet/{{ item }}.pem
mode: preserve
with_items:
- "{{ workers }}"
- name: Copy worker node key pem file to kubelet directory
copy:
src: /var/tmp/kubernetes/{{ item }}-key.pem
dest: /var/lib/kubelet/{{ item }}-key.pem
mode: preserve
with_items:
- "{{ workers }}"
- name: Copy kube-proxy kubeconfig file to kube-proxy directory
copy:
src: /var/tmp/kubernetes/kube-proxy.kubeconfig
dest: /var/lib/kube-proxy/kubeconfig
mode: preserve
######################################################
# Setting up etcd #
######################################################
- name: Setting up etcd on the controller nodes
hosts: masters
become: true
tasks:
- name: Copy the etcd binary
copy:
src: files/x86_64/{{ item }}
dest: /usr/local/bin/
mode: 755
with_items:
- etcd
- etcdctl
when:
- ansible_architecture == "x86_64"
- name: Copy the etcd binary
copy:
src: files/arm/{{ item }}
dest: /usr/local/bin/
mode: 755
with_items:
- etcd
- etcdctl
when:
- ansible_lsb.id == "Raspbian"
- name: Creating the etcd service file
template:
src: templates/etcd.service.j2
dest: /etc/systemd/system/etcd.service
- name: Start and enable the etcd service
service:
name: etcd
state: started
enabled: true
- name: Provision the kubernetes Control Plane
file:
path: /etc/kubernetes/config
state: directory
- name: Download the kubernetes binaries
get_url:
url: https://storage.googleapis.com/kubernetes-release/release/v1.10.6/bin/linux/amd64/{{ item }}
dest: /usr/local/bin
mode: 0755
with_items:
- kube-apiserver
- kube-controller-manager
- kube-scheduler
- kubectl
- name: Configure the API server
template:
src: templates/kube-apiserver.service.j2
dest: /etc/systemd/system/kube-apiserver.service
- name: Start and enable the API server service
service:
name: kube-apiserver
state: started
enabled: true
- name: Configure the Controller Manager server
template:
src: templates/kube-controller-manager.service.j2
dest: /etc/systemd/system/kube-controller-manager.service
- name: Start and enable the controller manager service
service:
name: kube-controller-manager
state: started
enabled: true
- name: Configure the Scheduler server
template:
src: templates/kube-scheduler.service.j2
dest: /etc/systemd/system/kube-scheduler.service
- name: Copy in the kube-scheduler config file
template:
src: templates/kube-scheduler.yaml.j2
dest: /etc/kubernetes/config/kube-scheduler.yaml
- name: Start and enable the scheduler service
service:
name: kube-scheduler
state: started
enabled: true
- name: Enable API server health checks
apt:
name: nginx
state: present
- name: Configure NGINX correctly
template:
src: templates/kubernetes.default.svc.cluster.local.j2
dest: /etc/nginx/sites-available/kubernetes.default.svc.cluster.local
- name: Activate the configuration
file:
src: /etc/nginx/sites-available/kubernetes.default.svc.cluster.local
path: /etc/nginx/sites-enabled/kubernetes.default.svc.cluster.local
state: link
- name: Start the NGINX service
service:
name: nginx
state: started
enabled: true

View File

@ -0,0 +1,119 @@
---
- name: Playbook to rollback an automated manual k8s installation
hosts: workers
become: true
tasks:
- name: Stop k8s services prior to deleting them
service:
name: "{{ item }}"
state: stopped
enabled: false
with_items:
- "kube-proxy"
- "containerd"
- "kubelet"
- name: Delete the binaries
file:
path: /usr/local/bin/{{ item }}
state: absent
with_items:
- kubectl
- kube-proxy
- kubelet
- runc
- runsc
- crictl
- name: Remove the dependencies
apt:
name: "{{ item }}"
state: absent
with_items:
- "socat"
- "ipset"
- "conntrack"
- name: Delete service files
file:
path: /etc/systemd/system/{{ item }}
state: absent
with_items:
- kubelet.service
- kube-proxy.service
- containerd.service
- name: Delete k8s directories and files
file:
path: "{{ item }}"
state: absent
with_items:
- "/var/lib/kube-proxy"
- "/var/lib/kubernetes"
- "/var/lib/kubelet"
- "/etc/containerd"
- "/etc/cni"
- "/etc/kubernetes"
- "/bin/containerd"
- "/bin/ctr"
- "/bin/containerd-shim-runc-v1"
- "/bin/containerd-shim"
- "/bin/containerd-release"
- "/bin/containerd-stress"
- "/opt/cni"
- name: Playbook to rollback an automated manual k8s installation
hosts: masters
become: true
tasks:
- name: Stop k8s services prior to deleting them
service:
name: "{{ item }}"
state: stopped
enabled: false
with_items:
- "kube-scheduler"
- "kube-controller-manager"
- "kube-apiserver"
- "etcd"
- name: Delete the binaries
file:
path: /usr/local/bin/{{ item }}
state: absent
with_items:
- kube-scheduler
- kube-controller-manager
- kube-apiserver
- etcd
- etcdctl
- name: Delete the systemd service files
file:
path: /etc/systemd/system/{{ item }}
state: absent
with_items:
- etcd.service
- kube-apiserver.service
- kube-controller-manager.service
- kube-scheduler.service
- name: Delete the config and TLS files
file:
path: "{{ item }}"
state: absent
with_items:
- "/var/lib/kubernetes"
- "/var/lib/kubelet"
- "/etc/etcd"
- "/etc/kubernetes"
- "/home/ansible/admin.kubeconfig"
- name: Remove the cryptographic files from this host
hosts: localhost
become: true
tasks:
- name: Remove kubernetes directory
file:
path: /var/tmp/kubernetes
state: absent
- name: Remove cfssl and cfssljson files
file:
path: /usr/local/bin/{{ item }}
state: absent
with_items:
- cfssl
- cfssljson

57
k8s-deploy/meta/main.yml Normal file
View File

@ -0,0 +1,57 @@
galaxy_info:
author: your name
description: your description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Some suggested licenses:
# - BSD (default)
# - MIT
# - GPLv2
# - GPLv3
# - Apache
# - CC-BY
license: license (GPLv2, CC-BY, etc)
min_ansible_version: 1.2
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
# Optionally specify the branch Galaxy will use when accessing the GitHub
# repo for this role. During role install, if no tags are available,
# Galaxy will use this branch. During import Galaxy will access files on
# this branch. If Travis integration is configured, only notifications for this
# branch will be accepted. Otherwise, in all cases, the repo's default branch
# (usually master) will be used.
#github_branch:
#
# platforms is a list of platforms, and each platform has a name and a list of versions.
#
# platforms:
# - name: Fedora
# versions:
# - all
# - 25
# - name: SomePlatform
# versions:
# - all
# - 1.0
# - 7
# - 99.99
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

View File

@ -0,0 +1,128 @@
---
- name: Playbook to automate a manual k8s installation
hosts: localhost
vars:
worker_name:
- debian-k8s-node1
- debian-k8s-node2
server_name: "{{ item }}"
haproxy_addr: "192.168.11.58"
kube_cluster: "kubernetes"
become: true
tasks:
- name: Download and install the cfssl utility
get_url:
url: https://pkg.cfssl.org/R1.2/{{ item }}_linux-amd64
dest: /usr/local/bin/{{ item }}
mode: 0755
with_items:
- cfssl
- cfssljson
- name: Put the seed key material files in place
file:
path: /var/tmp/kubernetes
state: directory
- copy:
src: files/{{ item }}
dest: /var/tmp/kubernetes/
mode: preserve
with_items:
- ca-csr.json
- admin-csr.json
- ca-config.json
- kube-controller-manager-csr.json
- kube-proxy-csr.json
- kube-scheduler-csr.json
- kubernetes-csr.json
- service-account-csr.json
- template:
src: templates/worker-csr_json.j2
dest: /var/tmp/kubernetes/{{ item }}-csr.json
mode: preserve
with_items:
- "{{ worker_name }}"
- name: Create the CA
shell: /usr/local/bin/cfssl gencert -initca ca-csr.json | /usr/local/bin/cfssljson -bare ca
args:
chdir: /var/tmp/kubernetes
- name: Create the admin KMOs
shell: /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | /usr/local/bin/cfssljson -bare admin
args:
chdir: /var/tmp/kubernetes
- name: Create the worker node certificates
shell: /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -hostname={{ item }} -profile=kubernetes {{ item }}-csr.json | /usr/local/bin/cfssljson -bare {{ item }}
args:
chdir: /var/tmp/kubernetes
with_items:
- "{{ worker_name }}"
- name: Create the kube-controller-manager KMOs
shell: /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | /usr/local/bin/cfssljson -bare kube-controller-manager
args:
chdir: /var/tmp/kubernetes
- name: Create the kube-proxy KMOs
shell: /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | /usr/local/bin/cfssljson -bare kube-proxy
args:
chdir: /var/tmp/kubernetes
- name: Create the kube-scheduler KMOs
shell: /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | /usr/local/bin/cfssljson -bare kube-scheduler
args:
chdir: /var/tmp/kubernetes
- name: Create the kubernetes cluster KMOs
shell: /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -hostname=10.32.0.1,10.240.0.10,10.240.0.11,"{{ haproxy_addr }}",127.0.0.1,kubernetes.default -profile=kubernetes kubernetes-csr.json | /usr/local/bin/cfssljson -bare kubernetes
args:
chdir: /var/tmp/kubernetes
- name: Create the kubernetes service account KMOs
shell: /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes service-account-csr.json | /usr/local/bin/cfssljson -bare service-account
args:
chdir: /var/tmp/kubernetes
- name: Create the worker node kubeconfig files
shell: kubectl config set-cluster {{ kube_cluster }} --certificate-authority=ca.pem --embed-certs=true --server=https://{{ haproxy_addr }}:6443 --kubeconfig={{ item }}.kubeconfig; kubectl config set-credentials system:node:{{ item }} --client-certificate={{ item }}.pem --client-key={{ item }}-key.pem --embed-certs=true --kubeconfig={{ item }}.kubeconfig; kubectl config set-context default --cluster={{ kube_cluster }} --user=system:node:{{ item }} --kubeconfig={{ item }}.kubeconfig; kubectl config use-context default --kubeconfig={{ item }}.kubeconfig
args:
chdir: /var/tmp/kubernetes
with_items:
- "{{ worker_name }}"
- name: Create the kube-proxy kubeconfig file
shell: kubectl config set-cluster {{ kube_cluster }} --certificate-authority=ca.pem --embed-certs=true --server=https://{{ haproxy_addr }}:6443 --kubeconfig=kube-proxy.kubeconfig; kubectl config set-credentials system:kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig; kubectl config set-context default --cluster={{ kube_cluster }} --user=system:kube-proxy --kubeconfig=kube-proxy.kubeconfig; kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
args:
chdir: /var/tmp/kubernetes
- name: Create the controller-manager kubeconfig file
shell: kubectl config set-cluster {{ kube_cluster }} --certificate-authority=ca.pem --embed-certs=true --server=https://127.0.0.1:6443 --kubeconfig=kube-controller-manager.kubeconfig; kubectl config set-credentials system:kube-controller-manager --client-certificate=kube-controller-manager.pem --client-key=kube-controller-manager-key.pem --embed-certs=true --kubeconfig=kube-controller-manager.kubeconfig; kubectl config set-context default --cluster={{ kube_cluster }} --user=system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig; kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig
args:
chdir: /var/tmp/kubernetes
- name: Create the kube-scheduler kubeconfig file
shell: kubectl config set-cluster {{ kube_cluster }} --certificate-authority=ca.pem --embed-certs=true --server=https://127.0.0.1:6443 --kubeconfig=kube-scheduler.kubeconfig; kubectl config set-credentials system:kube-scheduler --client-certificate=kube-scheduler.pem --client-key=kube-scheduler-key.pem --embed-certs=true --kubeconfig=kube-scheduler.kubeconfig; kubectl config set-context default --cluster={{ kube_cluster }} --user=system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig; kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig
args:
chdir: /var/tmp/kubernetes
- name: Create admin kubeconfig file
shell: kubectl config set-cluster {{ kube_cluster }} --certificate-authority=ca.pem --embed-certs=true --server=https://127.0.0.1:6443 --kubeconfig=admin.kubeconfig; kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=admin.kubeconfig; kubectl config set-context default --cluster={{ kube_cluster }} --user=admin --kubeconfig=admin.kubeconfig; kubectl config use-context default --kubeconfig=admin.kubeconfig
args:
chdir: /var/tmp/kubernetes
- name: Create data encryption key
shell: head -c 32 /dev/urandom | base64
register: enc_key
- name: Generate the encryption file
template:
src: templates/encryption-config.j2
dest: /var/tmp/kubernetes/encryption-config.yaml
- name: Set the owner of files to be ansible
file:
path: /var/tmp/kubernetes
owner: jonny
recurse: true

View File

@ -0,0 +1,2 @@
---
# tasks file for k8s-deploy

View File

@ -0,0 +1,15 @@
{
"cniVersion": "0.3.1",
"name": "bridge",
"type": "bridge",
"bridge": "cnio0",
"isGateway": true,
"ipMasq": true,
"ipam": {
"type": "host-local",
"ranges": [
[{"subnet": "{{ pod_cidr }}"}]
],
"routes": [{"dst": "0.0.0.0/0"}]
}
}

View File

@ -0,0 +1,11 @@
kind: EncryptionConfig
apiVersion: v1
resources:
- resources:
- secrets
providers:
- aescbc:
keys:
- name: key1
secret: {{ enc_key.stdout }}
- identity: {}

View File

@ -0,0 +1,28 @@
[Unit]
Description=etcd
Documentation=https://github.com/coreos
[Service]
ExecStart=/usr/local/bin/etcd \
--name {{ ansible_hostname }} \
--cert-file=/etc/etcd/kubernetes.pem \
--key-file=/etc/etcd/kubernetes-key.pem \
--peer-cert-file=/etc/etcd/kubernetes.pem \
--peer-key-file=/etc/etcd/kubernetes-key.pem \
--trusted-ca-file=/etc/etcd/ca.pem \
--peer-trusted-ca-file=/etc/etcd/ca.pem \
--peer-client-cert-auth \
--client-cert-auth \
--initial-advertise-peer-urls https://{{ ansible_default_ipv4.address }}:2380 \
--listen-peer-urls https://{{ ansible_default_ipv4.address }}:2380 \
--listen-client-urls https://{{ ansible_default_ipv4.address }}:2379,https://127.0.0.1:2379 \
--advertise-client-urls https://{{ ansible_default_ipv4.address }}:2379 \
--initial-cluster-token etcd-cluster-0 \
--initial-cluster {{ etcd_host1 }}=https://{{ etcd_host1_ip }}:2380,{{ etcd_host2 }}=https://{{ etcd_host2_ip }}:2380 \
--initial-cluster-state new \
--data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,28 @@
[Unit]
Description=etcd
Documentation=https://github.com/coreos
[Service]
ExecStart=ETCD_UNSUPPORTED_ARCH=arm /usr/local/bin/etcd \
--name {{ ansible_hostname }} \
--cert-file=/etc/etcd/kubernetes.pem \
--key-file=/etc/etcd/kubernetes-key.pem \
--peer-cert-file=/etc/etcd/kubernetes.pem \
--peer-key-file=/etc/etcd/kubernetes-key.pem \
--trusted-ca-file=/etc/etcd/ca.pem \
--peer-trusted-ca-file=/etc/etcd/ca.pem \
--peer-client-cert-auth \
--client-cert-auth \
--initial-advertise-peer-urls https://{{ ansible_default_ipv4.address }}:2380 \
--listen-peer-urls https://{{ ansible_default_ipv4.address }}:2380 \
--listen-client-urls https://{{ ansible_default_ipv4.address }}:2379,https://127.0.0.1:2379 \
--advertise-client-urls https://{{ ansible_default_ipv4.address }}:2379 \
--initial-cluster-token etcd-cluster-0 \
--initial-cluster {{ etcd_host1 }}=https://{{ etcd_host1_ip }}:2380,{{ etcd_host2 }}=https://{{ etcd_host2_ip }}:2380 \
--initial-cluster-state new \
--data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,40 @@
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
--advertise-address={{ ansible_default_ipv4.address }} \
--allow-privileged=true \
--apiserver-count=3 \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/log/audit.log \
--authorization-mode=Node,RBAC \
--bind-address=0.0.0.0 \
--client-ca-file=/var/lib/kubernetes/ca.pem \
--enable-admission-plugins=Initializers,NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
--enable-swagger-ui=true \
--etcd-cafile=/var/lib/kubernetes/ca.pem \
--etcd-certfile=/var/lib/kubernetes/kubernetes.pem \
--etcd-keyfile=/var/lib/kubernetes/kubernetes-key.pem \
--etcd-servers=https://{{ etcd_host1_ip }}:2379,https://{{ etcd_host2_ip }}:2379 \
--event-ttl=1h \
--experimental-encryption-provider-config=/var/lib/kubernetes/encryption-config.yaml \
--kubelet-certificate-authority=/var/lib/kubernetes/ca.pem \
--kubelet-client-certificate=/var/lib/kubernetes/kubernetes.pem \
--kubelet-client-key=/var/lib/kubernetes/kubernetes-key.pem \
--kubelet-https=true \
--runtime-config=api/all \
--service-account-key-file=/var/lib/kubernetes/service-account.pem \
--service-cluster-ip-range=10.32.0.0/24 \
--service-node-port-range=30000-32767 \
--tls-cert-file=/var/lib/kubernetes/kubernetes.pem \
--tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem \
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,24 @@
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-controller-manager \
--address=0.0.0.0 \
--cluster-cidr=10.200.0.0/16 \
--allocate-node-cidrs=true \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/var/lib/kubernetes/ca.pem \
--cluster-signing-key-file=/var/lib/kubernetes/ca-key.pem \
--kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig \
--leader-elect=true \
--root-ca-file=/var/lib/kubernetes/ca.pem \
--service-account-private-key-file=/var/lib/kubernetes/service-account-key.pem \
--service-cluster-ip-range=10.32.0.0/24 \
--use-service-account-credentials=true \
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,6 @@
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
clientConnection:
kubeconfig: "/var/lib/kube-proxy/kubeconfig"
mode: "iptables"
clusterCIDR: "{{ cluster_cidr }}"

View File

@ -0,0 +1,13 @@
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-scheduler \
--config=/etc/kubernetes/config/kube-scheduler.yaml \
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,6 @@
apiVersion: componentconfig/v1alpha1
kind: KubeSchedulerConfiguration
clientConnection:
kubeconfig: "/var/lib/kubernetes/kube-scheduler.kubeconfig"
leaderElection:
leaderElect: true

View File

@ -0,0 +1,18 @@
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
anonymous:
enabled: false
webhook:
enabled: true
x509:
clientCAFile: "/var/lib/kubernetes/ca.pem"
authorization:
mode: Webhook
clusterDomain: "cluster.local"
clusterDNS:
- "10.32.0.10"
podCIDR: "{{ pod_cidr }}"
runtimeRequestTimeout: "15m"
tlsCertFile: "/var/lib/kubelet/{{ ansible_hostname }}.pem"
tlsPrivateKeyFile: "/var/lib/kubelet/{{ ansible_hostname }}-key.pem"

View File

@ -0,0 +1,9 @@
server {
listen 80;
server_name kubernetes.default.svc.cluster.local;
location /healthz {
proxy_pass https://127.0.0.1:6443/healthz;
proxy_ssl_trusted_certificate /var/lib/kubernetes/ca.pem;
}
}

View File

@ -0,0 +1,16 @@
{
"CN": "system:node:{{ server_name }}",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "HK",
"L": "Hong Kong",
"O": "system:nodes",
"OU": "Kubernetes - Ansible",
"ST": "Hong Kong"
}
]
}

12
k8s-deploy/test.yaml Normal file
View File

@ -0,0 +1,12 @@
---
- name: test
hosts: localhost
become: true
tasks:
- name: Create data encryption key
shell: head -c 32 /dev/urandom | base64
register: enc_key
- name: Generate the encryption file
template:
src: templates/encryption-config.j2
dest: /var/tmp/kubernetes/encryption-config.yaml

View File

@ -0,0 +1,2 @@
localhost

View File

@ -0,0 +1,5 @@
---
- hosts: localhost
remote_user: root
roles:
- k8s-deploy

2
k8s-deploy/vars/main.yml Normal file
View File

@ -0,0 +1,2 @@
---
# vars file for k8s-deploy

17
rollback_k8s-deploy.yaml Normal file
View File

@ -0,0 +1,17 @@
---
- name: Ansible playbook to roll-back any changes the deployment playbook makes
hosts: localhost
become: true
tasks:
- name: Uninstall the cfssl tool
file:
path: /usr/local/bin/cfssl
state: absent
- name: Uninstall the cfssljson tool
file:
path: /usr/local/bin/cfssljson
state: absent
- name: Delete the key material files and directory
file:
path: /var/tmp/kubernetes
state: absent