terraform-gcp-k8s/ansible/yum-config-manager.yaml
Jonny Ervine 88b85b2c84 new file: ansible/inv-gcp.yml
new file:   ansible/inventory/gce.ini
	new file:   ansible/inventory/gce.py
	new file:   ansible/master-node-create.yaml
	new file:   ansible/secrets.py
	new file:   ansible/test-inv
	new file:   ansible/test.yaml
	new file:   ansible/work-kube-config.yaml
	new file:   ansible/worker-config.yaml
	new file:   ansible/yum-config-manager.yaml
	new file:   gcp-lb/main.tf
	new file:   gcp-lb/outputs.tf
	new file:   gcp-lb/provider.tf
	new file:   gcp-lb/variables.tf
	new file:   k8s-master/firewall.tf
	new file:   k8s-master/main.tf
	new file:   k8s-master/network.tf
	new file:   k8s-master/outputs.tf
	new file:   k8s-master/provider.tf
	new file:   k8s-master/scripts/get-metadata-gce.sh
	new file:   k8s-master/scripts/id_ecdsa
	new file:   k8s-master/scripts/id_ecdsa.pub
	new file:   k8s-master/scripts/startup.sh
	new file:   k8s-master/variables.tf
	new file:   k8s-workers/firewall.tf
	new file:   k8s-workers/main.tf
	new file:   k8s-workers/outputs.tf
	new file:   k8s-workers/provider.tf
	new file:   k8s-workers/scripts/get-metadata-gce.sh
	new file:   k8s-workers/scripts/id_ecdsa
	new file:   k8s-workers/scripts/id_ecdsa.pub
	new file:   k8s-workers/scripts/startup.sh
	new file:   k8s-workers/variables.tf
	new file:   main.tf
	new file:   provider.tf
	new file:   variables.tf
	new file:   versions.tf
Initial commit
2020-02-19 08:24:39 +00:00

279 lines
7.8 KiB
YAML

---
- name: Install yum-utils
hosts: all
become: true
tasks:
- name: Install yum-utils
yum:
name: yum-utils
state: present
when:
ansible_distribution == "CentOS"
- name: Add the kubernetes repo
hosts: all
become: true
tasks:
- name: Add kubernetes repo
yum_repository:
name: kubernetes
description: Kubernetes
baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
gpgcheck: 1
repo_gpgcheck: 1
gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
when:
ansible_distribution == "CentOS"
- name: Set SELinux to permissive
hosts: all
become: true
tasks:
- name: Set SELinux to permissive
selinux:
policy: targeted
state: permissive
when:
ansible_distribution == "CentOS"
- name: Install kubelet, kubeadm, kubectl
hosts: all
become: true
tasks:
- name: Install kubernetes binaries
yum:
name:
- kubelet
- kubeadm
- kubectl
state: present
when:
ansible_distribution == "CentOS"
- name: Start and enable the kubelet service
hosts: all
become: true
tasks:
- name: Start and enable the kubelet service
service:
name: kubelet
enabled: yes
state: started
when:
ansible_distribution == "CentOS"
- name: Add the CRI-O repo
hosts: all
become: true
tasks:
- name: Add kubernetes repo
yum_repository:
name: crio
description: CRI-O Repository
baseurl: https://cbs.centos.org/repos/paas7-crio-114-candidate/x86_64/os/
gpgcheck: 1
gpgkey: https://www.centos.org/keys/RPM-GPG-KEY-CentOS-SIG-PaaS
when:
ansible_distribution == "CentOS"
- name: Install and enable cri-o
hosts: all
become: true
tasks:
- name: Install CRI-O binary
yum:
name: cri-o
state: present
disable_gpg_check: true
when:
ansible_distribution == "CentOS"
- name: Add the CNI plugin directory to crio.conf
lineinfile:
path: /etc/crio/crio.conf
insertafter: '"/usr/libexec/cni",'
line: '"/opt/cni/bin"'
state: present
- name: Fix the crio-wipe lib.bash script (seems to be broken in current CRI-O build)
replace:
path: /usr/libexec/crio/crio-wipe/lib.bash
regexp: '\"\$1\"'
replace: '$1'
- name: Fix the crio-wipe lib.bash script (seems to be broken in current CRI-O build)
replace:
path: /usr/libexec/crio/crio-wipe/lib.bash
regexp: '\"\$2\"'
replace: '$2'
- name: Start and enable the cri-o service
service:
name: crio
enabled: yes
state: restarted
when:
ansible_distribution == "CentOS"
- name: Load necessary kernel modules
hosts: all
become: true
tasks:
- name: Load br_netfilter and overlay kernel modules
modprobe:
name: "{{ item }}"
state: present
with_items:
- "br_netfilter"
- "overlay"
when:
ansible_distribution == "CentOS"
- name: Set the sysctl values for networking
hosts: all
become: true
tasks:
- name: Set the iptables bridge parameter
sysctl:
name: net.bridge.bridge-nf-call-iptables
value: 1
sysctl_set: yes
state: present
sysctl_file: /etc/sysctl.d/99-k8s.conf
- name: Set the ip_forward parameter
sysctl:
name: net.ipv4.ip_forward
value: 1
sysctl_set: yes
state: present
sysctl_file: /etc/sysctl.d/99-k8s.conf
- name: Set the IPv6 iptables bridge parameter
sysctl:
name: net.bridge.bridge-nf-call-ip6tables
value: 1
sysctl_set: yes
state: present
sysctl_file: /etc/sysctl.d/99-k8s.conf
- name: Create the server side firewall rules
hosts: gcp_k8s_role_master
become: true
tasks:
- name: Create the firewalld rule for the API, etcd, kubelet API, scheduler, and controller-manager services for k8s
firewalld:
port: "{{ item }}"
state: enabled
permanent: true
with_items:
- "6443/tcp"
- "2379-2380/tcp"
- "9537/tcp"
- "10250/tcp"
- "10251/tcp"
- "10252/tcp"
- name: Create the server side firewall rules
hosts: gcp_k8s_role_worker
become: true
tasks:
- name: Create the firewalld rule for the API, etcd, kubelet API, scheduler, and controller-manager services for k8s
firewalld:
port: "{{ item }}"
state: enabled
permanent: true
with_items:
- "80/tcp"
- "443/tcp"
- "10250/tcp"
- "30000-32767/tcp"
- name: Create the kubernetes cluster using kubeadm
hosts: gcp_first_master_true
become: true
tasks:
- name: Check for admin.conf from kubeadm
stat: path=/etc/kubernetes/admin.conf
register: admin_conf
- set_fact:
running: true
when: admin_conf.stat.exists == false
- debug:
var: running
when: admin_conf.stat.exists == false
- name: Run kubeadm if admin.conf doesn't exist
command: kubeadm init --ignore-preflight-errors all --cri-socket /run/crio/crio.sock # Add this if using flannel networking --pod-network-cidr 10.244.0.0/16
async: 180
poll: 0
register: kubeadm_running
when: admin_conf.stat.exists == false
- name: Wait for the kubelet config file to be created
wait_for:
path: /var/lib/kubelet/config.yaml
when: admin_conf.stat.exists == false
- name: Check for /var/lib/kubelet/config.yaml and replace cgroupfs with systemd as cgroupDriver if found
replace:
path: /var/lib/kubelet/config.yaml
regexp: "cgroupDriver: cgroupfs"
replace: "cgroupDriver: systemd"
when: admin_conf.stat.exists == false
- name: Restart kubelet service
service:
name: kubelet
state: restarted
- name: Check that kubeadm has completed
async_status:
jid: "{{ kubeadm_running.ansible_job_id }}"
when: admin_conf.stat.exists == false
register: job_result
until: job_result.finished
retries: 30
- name: Create kubeadm join command
shell: kubeadm token create --print-join-command
register: results
when: admin_conf.stat.exists == false
- debug:
var: results.stdout
when: admin_conf.stat.exists == false
- set_fact:
token: "{{ results.stdout | regex_search(regexp, '\\2') | first }}"
vars:
regexp: '([^\s]+\s){4}([^\s]+)'
when: admin_conf.stat.exists == false
- debug:
var: token
when: admin_conf.stat.exists == false
- set_fact:
hash: "{{ results.stdout | regex_search(regexp, '\\1') | first }}"
vars:
regexp: '--discovery-token-ca-cert-hash ([^\s]+)'
when: admin_conf.stat.exists == false
- debug:
var: hash
when: admin_conf.stat.exists == false
- name: Install weave networking for x86_64
shell: curl -sSL "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" | kubectl --kubeconfig=/etc/kubernetes/admin.conf create -f -
when:
- admin_conf.stat.exists == false
- name: Set up worker nodes
hosts: gcp_k8s_role_worker
become: true
tasks:
- name: Install kubernetes on nodes
command: kubeadm join "{{ hostvars[item]['ansible_eth0']['ipv4']['address'] }}":6443 --token "{{ hostvars[item]['token'] }}" --discovery-token-ca-cert-hash "{{ hostvars[item]['hash'] }}"
when: hostvars[item]['running'] == true
with_items: "{{ groups['gcp_first_master_true'] }}"
- name: Wait for the kubelet config file to be created
wait_for:
path: /var/lib/kubelet/config.yaml
- name: Check for /var/lib/kubelet/config.yaml and replace cgroupfs with systemd as cgroupDriver if found
replace:
path: /var/lib/kubelet/config.yaml
regexp: "cgroupDriver: cgroupfs"
replace: "cgroupDriver: systemd"
register: kube_updated
- name: Restart kubelet service
service:
name: kubelet
state: restarted
when: kube_updated.changed == true