new file: ansible/inv-gcp.yml

new file:   ansible/inventory/gce.ini
	new file:   ansible/inventory/gce.py
	new file:   ansible/master-node-create.yaml
	new file:   ansible/secrets.py
	new file:   ansible/test-inv
	new file:   ansible/test.yaml
	new file:   ansible/work-kube-config.yaml
	new file:   ansible/worker-config.yaml
	new file:   ansible/yum-config-manager.yaml
	new file:   gcp-lb/main.tf
	new file:   gcp-lb/outputs.tf
	new file:   gcp-lb/provider.tf
	new file:   gcp-lb/variables.tf
	new file:   k8s-master/firewall.tf
	new file:   k8s-master/main.tf
	new file:   k8s-master/network.tf
	new file:   k8s-master/outputs.tf
	new file:   k8s-master/provider.tf
	new file:   k8s-master/scripts/get-metadata-gce.sh
	new file:   k8s-master/scripts/id_ecdsa
	new file:   k8s-master/scripts/id_ecdsa.pub
	new file:   k8s-master/scripts/startup.sh
	new file:   k8s-master/variables.tf
	new file:   k8s-workers/firewall.tf
	new file:   k8s-workers/main.tf
	new file:   k8s-workers/outputs.tf
	new file:   k8s-workers/provider.tf
	new file:   k8s-workers/scripts/get-metadata-gce.sh
	new file:   k8s-workers/scripts/id_ecdsa
	new file:   k8s-workers/scripts/id_ecdsa.pub
	new file:   k8s-workers/scripts/startup.sh
	new file:   k8s-workers/variables.tf
	new file:   main.tf
	new file:   provider.tf
	new file:   variables.tf
	new file:   versions.tf
Initial commit
This commit is contained in:
Jonny Ervine 2020-02-19 08:24:39 +00:00
parent dcadf41840
commit 88b85b2c84
37 changed files with 1882 additions and 0 deletions

25
ansible/inv-gcp.yml Normal file
View File

@ -0,0 +1,25 @@
plugin: gcp_compute
zones: # populate inventory with instances in these regions
- us-central1-a
projects:
- centos-k8s
filters:
# - scheduling.automaticRestart = true AND machineType = n1-standard-1
service_account_file: /home/jonny/terraform/gcp-k8s/centos-k8s-d9557c7f6db3.json
auth_kind: serviceaccount
scopes:
- 'https://www.googleapis.com/auth/cloud-platform'
- 'https://www.googleapis.com/auth/compute.readonly'
keyed_groups:
# Create groups from GCE labels
- prefix: gcp
key: labels
hostnames:
# List host by name instead of the default public ip
- name
compose:
# Set an inventory parameter to use the Public IP address to connect to the host
# For Private ip use "networkInterfaces[0].networkIP"
ansible_host: networkInterfaces[0].accessConfigs[0].natIP

76
ansible/inventory/gce.ini Normal file
View File

@ -0,0 +1,76 @@
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# The GCE inventory script has the following dependencies:
# 1. A valid Google Cloud Platform account with Google Compute Engine
# enabled. See https://cloud.google.com
# 2. An OAuth2 Service Account flow should be enabled. This will generate
# a private key file that the inventory script will use for API request
# authorization. See https://developers.google.com/accounts/docs/OAuth2
# 3. Convert the private key from PKCS12 to PEM format
# $ openssl pkcs12 -in pkey.pkcs12 -passin pass:notasecret \
# > -nodes -nocerts | openssl rsa -out pkey.pem
# 4. The libcloud (>=0.13.3) python libray. See http://libcloud.apache.org
#
# (See ansible/test/gce_tests.py comments for full install instructions)
#
# Author: Eric Johnson <erjohnso@google.com>
# Contributors: John Roach <johnroach1985@gmail.com>
[gce]
# GCE Service Account configuration information can be stored in the
# libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already
# exist in your PYTHONPATH and be picked up automatically with an import
# statement in the inventory script. However, you can specify an absolute
# path to the secrets.py file with 'libcloud_secrets' parameter.
# This option will be deprecated in a future release.
libcloud_secrets =
# If you are not going to use a 'secrets.py' file, you can set the necessary
# authorization parameters here.
# You can add multiple gce projects to by using a comma separated list. Make
# sure that the service account used has permissions on said projects.
gce_service_account_email_address =
gce_service_account_pem_file_path =
gce_project_id =
gce_zone =
# Filter inventory based on state. Leave undefined to return instances regardless of state.
# example: Uncomment to only return inventory in the running or provisioning state
#instance_states = RUNNING,PROVISIONING
# Filter inventory based on instance tags. Leave undefined to return instances regardless of tags.
# example: Uncomment to only return inventory with the http-server or https-server tag
#instance_tags = http-server,https-server
[inventory]
# The 'inventory_ip_type' parameter specifies whether 'ansible_ssh_host' should
# contain the instance internal or external address. Values may be either
# 'internal' or 'external'. If 'external' is specified but no external instance
# address exists, the internal address will be used.
# The INVENTORY_IP_TYPE environment variable will override this value.
inventory_ip_type =
[cache]
# directory in which cache should be created
cache_path = ~/.ansible/tmp
# The number of seconds a cache file is considered valid. After this many
# seconds, a new API call will be made, and the cache file will be updated.
# To disable the cache, set this value to 0
cache_max_age = 300

521
ansible/inventory/gce.py Executable file
View File

@ -0,0 +1,521 @@
#!/usr/bin/env python
# Copyright: (c) 2013, Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
GCE external inventory script
=================================
Generates inventory that Ansible can understand by making API requests
Google Compute Engine via the libcloud library. Full install/configuration
instructions for the gce* modules can be found in the comments of
ansible/test/gce_tests.py.
When run against a specific host, this script returns the following variables
based on the data obtained from the libcloud Node object:
- gce_uuid
- gce_id
- gce_image
- gce_machine_type
- gce_private_ip
- gce_public_ip
- gce_name
- gce_description
- gce_status
- gce_zone
- gce_tags
- gce_metadata
- gce_network
- gce_subnetwork
When run in --list mode, instances are grouped by the following categories:
- zone:
zone group name examples are us-central1-b, europe-west1-a, etc.
- instance tags:
An entry is created for each tag. For example, if you have two instances
with a common tag called 'foo', they will both be grouped together under
the 'tag_foo' name.
- network name:
the name of the network is appended to 'network_' (e.g. the 'default'
network will result in a group named 'network_default')
- machine type
types follow a pattern like n1-standard-4, g1-small, etc.
- running status:
group name prefixed with 'status_' (e.g. status_running, status_stopped,..)
- image:
when using an ephemeral/scratch disk, this will be set to the image name
used when creating the instance (e.g. debian-7-wheezy-v20130816). when
your instance was created with a root persistent disk it will be set to
'persistent_disk' since there is no current way to determine the image.
Examples:
Execute uname on all instances in the us-central1-a zone
$ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
Use the GCE inventory script to print out instance specific information
$ contrib/inventory/gce.py --host my_instance
Author: Eric Johnson <erjohnso@google.com>
Contributors: Matt Hite <mhite@hotmail.com>, Tom Melendez <supertom@google.com>,
John Roach <johnroach1985@gmail.com>
Version: 0.0.4
'''
try:
import pkg_resources
except ImportError:
# Use pkg_resources to find the correct versions of libraries and set
# sys.path appropriately when there are multiversion installs. We don't
# fail here as there is code that better expresses the errors where the
# library is used.
pass
USER_AGENT_PRODUCT = "Ansible-gce_inventory_plugin"
USER_AGENT_VERSION = "v2"
import sys
import os
import argparse
from time import time
from ansible.module_utils.six.moves import configparser
import logging
logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
import json
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
_ = Provider.GCE
except Exception:
sys.exit("GCE inventory script requires libcloud >= 0.13")
class CloudInventoryCache(object):
def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp',
cache_max_age=300):
cache_dir = os.path.expanduser(cache_path)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = os.path.join(cache_dir, cache_name)
self.cache_max_age = cache_max_age
def is_valid(self, max_age=None):
''' Determines if the cache files have expired, or if it is still valid '''
if max_age is None:
max_age = self.cache_max_age
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + max_age) > current_time:
return True
return False
def get_all_data_from_cache(self, filename=''):
''' Reads the JSON inventory from the cache file. Returns Python dictionary. '''
data = ''
if not filename:
filename = self.cache_path_cache
with open(filename, 'r') as cache:
data = cache.read()
return json.loads(data)
def write_to_cache(self, data, filename=''):
''' Writes data to file as JSON. Returns True. '''
if not filename:
filename = self.cache_path_cache
json_data = json.dumps(data)
with open(filename, 'w') as cache:
cache.write(json_data)
return True
class GceInventory(object):
def __init__(self):
# Cache object
self.cache = None
# dictionary containing inventory read from disk
self.inventory = {}
# Read settings and parse CLI arguments
self.parse_cli_args()
self.config = self.get_config()
self.drivers = self.get_gce_drivers()
self.ip_type = self.get_inventory_options()
if self.ip_type:
self.ip_type = self.ip_type.lower()
# Cache management
start_inventory_time = time()
cache_used = False
if self.args.refresh_cache or not self.cache.is_valid():
self.do_api_calls_update_cache()
else:
self.load_inventory_from_cache()
cache_used = True
self.inventory['_meta']['stats'] = {'use_cache': True}
self.inventory['_meta']['stats'] = {
'inventory_load_time': time() - start_inventory_time,
'cache_used': cache_used
}
# Just display data for specific host
if self.args.host:
print(self.json_format_dict(
self.inventory['_meta']['hostvars'][self.args.host],
pretty=self.args.pretty))
else:
# Otherwise, assume user wants all instances grouped
zones = self.parse_env_zones()
print(self.json_format_dict(self.inventory,
pretty=self.args.pretty))
sys.exit(0)
def get_config(self):
"""
Reads the settings from the gce.ini file.
Populates a ConfigParser object with defaults and
attempts to read an .ini-style configuration from the filename
specified in GCE_INI_PATH. If the environment variable is
not present, the filename defaults to gce.ini in the current
working directory.
"""
gce_ini_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
# Create a ConfigParser.
# This provides empty defaults to each key, so that environment
# variable configuration (as opposed to INI configuration) is able
# to work.
config = configparser.ConfigParser(defaults={
'gce_service_account_email_address': '',
'gce_service_account_pem_file_path': '',
'gce_project_id': '',
'gce_zone': '',
'libcloud_secrets': '',
'instance_tags': '',
'inventory_ip_type': '',
'cache_path': '~/.ansible/tmp',
'cache_max_age': '300'
})
if 'gce' not in config.sections():
config.add_section('gce')
if 'inventory' not in config.sections():
config.add_section('inventory')
if 'cache' not in config.sections():
config.add_section('cache')
config.read(gce_ini_path)
#########
# Section added for processing ini settings
#########
# Set the instance_states filter based on config file options
self.instance_states = []
if config.has_option('gce', 'instance_states'):
states = config.get('gce', 'instance_states')
# Ignore if instance_states is an empty string.
if states:
self.instance_states = states.split(',')
# Set the instance_tags filter, env var overrides config from file
# and cli param overrides all
if self.args.instance_tags:
self.instance_tags = self.args.instance_tags
else:
self.instance_tags = os.environ.get(
'GCE_INSTANCE_TAGS', config.get('gce', 'instance_tags'))
if self.instance_tags:
self.instance_tags = self.instance_tags.split(',')
# Caching
cache_path = config.get('cache', 'cache_path')
cache_max_age = config.getint('cache', 'cache_max_age')
# TOOD(supertom): support project-specific caches
cache_name = 'ansible-gce.cache'
self.cache = CloudInventoryCache(cache_path=cache_path,
cache_max_age=cache_max_age,
cache_name=cache_name)
return config
def get_inventory_options(self):
"""Determine inventory options. Environment variables always
take precedence over configuration files."""
ip_type = self.config.get('inventory', 'inventory_ip_type')
# If the appropriate environment variables are set, they override
# other configuration
ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
return ip_type
def get_gce_drivers(self):
"""Determine the GCE authorization settings and return a list of
libcloud drivers.
"""
# Attempt to get GCE params from a configuration file, if one
# exists.
secrets_path = self.config.get('gce', 'libcloud_secrets')
secrets_found = False
try:
import secrets
args = list(secrets.GCE_PARAMS)
kwargs = secrets.GCE_KEYWORD_PARAMS
secrets_found = True
except Exception:
pass
if not secrets_found and secrets_path:
if not secrets_path.endswith('secrets.py'):
err = "Must specify libcloud secrets file as "
err += "/absolute/path/to/secrets.py"
sys.exit(err)
sys.path.append(os.path.dirname(secrets_path))
try:
import secrets
args = list(getattr(secrets, 'GCE_PARAMS', []))
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
secrets_found = True
except Exception:
pass
if not secrets_found:
args = [
self.config.get('gce', 'gce_service_account_email_address'),
self.config.get('gce', 'gce_service_account_pem_file_path')
]
kwargs = {'project': self.config.get('gce', 'gce_project_id'),
'datacenter': self.config.get('gce', 'gce_zone')}
# If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs.
args[0] = os.environ.get('GCE_EMAIL', args[0])
args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
args[1] = os.environ.get('GCE_CREDENTIALS_FILE_PATH', args[1])
kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
kwargs['datacenter'] = os.environ.get('GCE_ZONE', kwargs['datacenter'])
gce_drivers = []
projects = kwargs['project'].split(',')
for project in projects:
kwargs['project'] = project
gce = get_driver(Provider.GCE)(*args, **kwargs)
gce.connection.user_agent_append(
'%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
)
gce_drivers.append(gce)
return gce_drivers
def parse_env_zones(self):
'''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
If provided, this will be used to filter the results of the grouped_instances call'''
import csv
reader = csv.reader([os.environ.get('GCE_ZONE', "")], skipinitialspace=True)
zones = [r for r in reader]
return [z for z in zones[0]]
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file based on GCE')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
parser.add_argument('--instance-tags', action='store',
help='Only include instances with this tags, separated by comma')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty format (default: False)')
parser.add_argument(
'--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests (default: False - use cache files)')
self.args = parser.parse_args()
def node_to_dict(self, inst):
md = {}
if inst is None:
return {}
if 'items' in inst.extra['metadata']:
for entry in inst.extra['metadata']['items']:
md[entry['key']] = entry['value']
net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
subnet = None
if 'subnetwork' in inst.extra['networkInterfaces'][0]:
subnet = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
# default to exernal IP unless user has specified they prefer internal
if self.ip_type == 'internal':
ssh_host = inst.private_ips[0]
else:
ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
return {
'gce_uuid': inst.uuid,
'gce_id': inst.id,
'gce_image': inst.image,
'gce_machine_type': inst.size,
'gce_private_ip': inst.private_ips[0],
'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
'gce_name': inst.name,
'gce_description': inst.extra['description'],
'gce_status': inst.extra['status'],
'gce_zone': inst.extra['zone'].name,
'gce_tags': inst.extra['tags'],
'gce_metadata': md,
'gce_network': net,
'gce_subnetwork': subnet,
# Hosts don't have a public name, so we add an IP
'ansible_ssh_host': ssh_host
}
def load_inventory_from_cache(self):
''' Loads inventory from JSON on disk. '''
try:
self.inventory = self.cache.get_all_data_from_cache()
hosts = self.inventory['_meta']['hostvars']
except Exception as e:
print(
"Invalid inventory file %s. Please rebuild with -refresh-cache option."
% (self.cache.cache_path_cache))
raise
def do_api_calls_update_cache(self):
''' Do API calls and save data in cache. '''
zones = self.parse_env_zones()
data = self.group_instances(zones)
self.cache.write_to_cache(data)
self.inventory = data
def list_nodes(self):
all_nodes = []
params, more_results = {'maxResults': 500}, True
while more_results:
for driver in self.drivers:
driver.connection.gce_params = params
all_nodes.extend(driver.list_nodes())
more_results = 'pageToken' in params
return all_nodes
def group_instances(self, zones=None):
'''Group all instances'''
groups = {}
meta = {}
meta["hostvars"] = {}
for node in self.list_nodes():
# This check filters on the desired instance states defined in the
# config file with the instance_states config option.
#
# If the instance_states list is _empty_ then _ALL_ states are returned.
#
# If the instance_states list is _populated_ then check the current
# state against the instance_states list
if self.instance_states and not node.extra['status'] in self.instance_states:
continue
# This check filters on the desired instance tags defined in the
# config file with the instance_tags config option, env var GCE_INSTANCE_TAGS,
# or as the cli param --instance-tags.
#
# If the instance_tags list is _empty_ then _ALL_ instances are returned.
#
# If the instance_tags list is _populated_ then check the current
# instance tags against the instance_tags list. If the instance has
# at least one tag from the instance_tags list, it is returned.
if self.instance_tags and not set(self.instance_tags) & set(node.extra['tags']):
continue
name = node.name
meta["hostvars"][name] = self.node_to_dict(node)
zone = node.extra['zone'].name
# To avoid making multiple requests per zone
# we list all nodes and then filter the results
if zones and zone not in zones:
continue
if zone in groups:
groups[zone].append(name)
else:
groups[zone] = [name]
tags = node.extra['tags']
for t in tags:
if t.startswith('group-'):
tag = t[6:]
else:
tag = 'tag_%s' % t
if tag in groups:
groups[tag].append(name)
else:
groups[tag] = [name]
net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
net = 'network_%s' % net
if net in groups:
groups[net].append(name)
else:
groups[net] = [name]
machine_type = node.size
if machine_type in groups:
groups[machine_type].append(name)
else:
groups[machine_type] = [name]
image = node.image or 'persistent_disk'
if image in groups:
groups[image].append(name)
else:
groups[image] = [name]
status = node.extra['status']
stat = 'status_%s' % status.lower()
if stat in groups:
groups[stat].append(name)
else:
groups[stat] = [name]
for private_ip in node.private_ips:
groups[private_ip] = [name]
if len(node.public_ips) >= 1:
for public_ip in node.public_ips:
groups[public_ip] = [name]
groups["_meta"] = meta
return groups
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
if __name__ == '__main__':
GceInventory()

View File

@ -0,0 +1,55 @@
- name: Set up master node
hosts: masters
become: true
tasks:
- name: Check for admin.conf from kubeadm
stat: path=/etc/kubernetes/admin.conf
register: admin_conf
- set_fact:
running: admin_conf.stat.exists
- name: Run kubeadm if admin.conf doesn't exist
command: kubeadm init
when: admin_conf.stat.exists == false
- name: Create kubeadm join command
shell: kubeadm token create --print-join-command
register: results
when: admin_conf.stat.exists == false
- debug:
var: results.stdout
when: admin_conf.stat.exists == false
- set_fact:
token: "{{ results.stdout | regex_search(regexp, '\\2') | first }}"
vars:
regexp: '([^\s]+\s){4}([^\s]+)'
when: admin_conf.stat.exists == false
- debug:
var: token
when: admin_conf.stat.exists == false
- set_fact:
hash: "{{ results.stdout | regex_search(regexp, '\\2') | first }}"
vars:
regexp: '([^\s]+\s){6}([^\s]+)'
when: admin_conf.stat.exists == false
- debug:
var: hash
when: admin_conf.stat.exists == false
- name: Install flannel networking for RPi
shell: curl -sSL https://rawgit.com/coreos/flannel/v0.10.0/Documentation/kube-flannel.yml | sed "s/amd64/arm/g" | kubectl --kubeconfig=/etc/kubernetes/admin.conf create -f -
when:
- ansible_lsb.id == "Raspbian"
- admin_conf.stat.exists == false
- name: Install flannel networking for x86_64
shell: curl -sSL https://rawgit.com/coreos/flannel/v0.10.0/Documentation/kube-flannel.yml | kubectl --kubeconfig=/etc/kubernetes/admin.conf create -f -
when:
- ansible_distribution == "Debian"
- ansible_architecture == "x86_64"
- admin_conf.stat.exists == false
- debug:
msg: "kubeadm has probably already been run."
when: admin_conf.stat.exists == true

2
ansible/secrets.py Normal file
View File

@ -0,0 +1,2 @@
GCE_PARAMS = ('1098936731058-compute@developer.gserviceaccount.com', '/home/jonny/terraform/gcp-k8s/centos-k8s-d9557c7f6db3.json')
GCE_KEYWORD_PARAMS = {'project': 'centos-k8s', 'datacenter': 'us-central1'}

2
ansible/test-inv Normal file
View File

@ -0,0 +1,2 @@
[master]
192.168.11.11

34
ansible/test.yaml Normal file
View File

@ -0,0 +1,34 @@
---
- name: Create the kubernetes cluster using kubeadm
hosts: masters
become: true
tasks:
- name: Check for admin.conf from kubeadm
stat: path=/etc/kubernetes/admin.conf
register: admin_conf
- set_fact:
running: admin_conf.stat.exists
- name: Create kubeadm join command
shell: kubeadm token create --print-join-command
register: results
when: admin_conf.stat.exists == true
- debug:
var: results.stdout
when: admin_conf.stat.exists == true
- set_fact:
token: "{{ results.stdout | regex_search(regexp, '\\2') | first }}"
vars:
regexp: '([^\s]+\s){4}([^\s]+)'
when: admin_conf.stat.exists == true
- debug:
var: token
when: admin_conf.stat.exists == true
- set_fact:
hash: "{{ results.stdout | regex_search(regexp, '\\1') | first }}"
vars:
regexp: '--discovery-token-ca-cert-hash ([^\s]+\s)'
when: admin_conf.stat.exists == true
- debug:
var: hash
when: admin_conf.stat.exists == true

View File

@ -0,0 +1,21 @@
---
- name: Set up worker nodes
hosts: gcp_k8s_role_worker
become: true
tasks:
- name: Wait for the kubelet config file to be created
wait_for:
path: /var/lib/kubelet/config.yaml
- name: Check for /var/lib/kubelet/config.yaml and replace cgroupfs with systemd as cgroupDriver if found
replace:
path: /var/lib/kubelet/config.yaml
regexp: "cgroupDriver: cgroupfs"
replace: "cgroupDriver: systemd"
register: kube_updated
- debug:
var: kube_updated
- name: Restart kubelet service
service:
name: kubelet
state: restarted
when: kube_updated.changed == true

View File

@ -0,0 +1,36 @@
---
- name: Create the kubernetes cluster using kubeadm
hosts: gcp_first_master_true
become: true
tasks:
- name: Create kubeadm join command
shell: kubeadm token create --print-join-command
register: results
- debug:
var: results.stdout
- set_fact:
running: true
- set_fact:
token: "{{ results.stdout | regex_search(regexp, '\\2') | first }}"
vars:
regexp: '([^\s]+\s){4}([^\s]+)'
- debug:
var: token
- set_fact:
hash: "{{ results.stdout | regex_search(regexp, '\\1') | first }}"
vars:
regexp: '--discovery-token-ca-cert-hash ([^\s]+)'
- debug:
var: hash
- name: Set up worker nodes
hosts: gcp_k8s_role_worker
become: true
tasks:
- name: Install kubernetes on nodes
command: kubeadm join "{{ hostvars[item]['ansible_eth0']['ipv4']['address'] }}":6443 --token "{{ hostvars[item]['token'] }}" --discovery-token-ca-cert-hash "{{ hostvars[item]['hash'] }}"
when: hostvars[item]['running'] == true
with_items: "{{ groups['gcp_first_master_true'] }}"
register: join_output
- debug:
var: join_output.stdout

View File

@ -0,0 +1,278 @@
---
- name: Install yum-utils
hosts: all
become: true
tasks:
- name: Install yum-utils
yum:
name: yum-utils
state: present
when:
ansible_distribution == "CentOS"
- name: Add the kubernetes repo
hosts: all
become: true
tasks:
- name: Add kubernetes repo
yum_repository:
name: kubernetes
description: Kubernetes
baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
gpgcheck: 1
repo_gpgcheck: 1
gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
when:
ansible_distribution == "CentOS"
- name: Set SELinux to permissive
hosts: all
become: true
tasks:
- name: Set SELinux to permissive
selinux:
policy: targeted
state: permissive
when:
ansible_distribution == "CentOS"
- name: Install kubelet, kubeadm, kubectl
hosts: all
become: true
tasks:
- name: Install kubernetes binaries
yum:
name:
- kubelet
- kubeadm
- kubectl
state: present
when:
ansible_distribution == "CentOS"
- name: Start and enable the kubelet service
hosts: all
become: true
tasks:
- name: Start and enable the kubelet service
service:
name: kubelet
enabled: yes
state: started
when:
ansible_distribution == "CentOS"
- name: Add the CRI-O repo
hosts: all
become: true
tasks:
- name: Add kubernetes repo
yum_repository:
name: crio
description: CRI-O Repository
baseurl: https://cbs.centos.org/repos/paas7-crio-114-candidate/x86_64/os/
gpgcheck: 1
gpgkey: https://www.centos.org/keys/RPM-GPG-KEY-CentOS-SIG-PaaS
when:
ansible_distribution == "CentOS"
- name: Install and enable cri-o
hosts: all
become: true
tasks:
- name: Install CRI-O binary
yum:
name: cri-o
state: present
disable_gpg_check: true
when:
ansible_distribution == "CentOS"
- name: Add the CNI plugin directory to crio.conf
lineinfile:
path: /etc/crio/crio.conf
insertafter: '"/usr/libexec/cni",'
line: '"/opt/cni/bin"'
state: present
- name: Fix the crio-wipe lib.bash script (seems to be broken in current CRI-O build)
replace:
path: /usr/libexec/crio/crio-wipe/lib.bash
regexp: '\"\$1\"'
replace: '$1'
- name: Fix the crio-wipe lib.bash script (seems to be broken in current CRI-O build)
replace:
path: /usr/libexec/crio/crio-wipe/lib.bash
regexp: '\"\$2\"'
replace: '$2'
- name: Start and enable the cri-o service
service:
name: crio
enabled: yes
state: restarted
when:
ansible_distribution == "CentOS"
- name: Load necessary kernel modules
hosts: all
become: true
tasks:
- name: Load br_netfilter and overlay kernel modules
modprobe:
name: "{{ item }}"
state: present
with_items:
- "br_netfilter"
- "overlay"
when:
ansible_distribution == "CentOS"
- name: Set the sysctl values for networking
hosts: all
become: true
tasks:
- name: Set the iptables bridge parameter
sysctl:
name: net.bridge.bridge-nf-call-iptables
value: 1
sysctl_set: yes
state: present
sysctl_file: /etc/sysctl.d/99-k8s.conf
- name: Set the ip_forward parameter
sysctl:
name: net.ipv4.ip_forward
value: 1
sysctl_set: yes
state: present
sysctl_file: /etc/sysctl.d/99-k8s.conf
- name: Set the IPv6 iptables bridge parameter
sysctl:
name: net.bridge.bridge-nf-call-ip6tables
value: 1
sysctl_set: yes
state: present
sysctl_file: /etc/sysctl.d/99-k8s.conf
- name: Create the server side firewall rules
hosts: gcp_k8s_role_master
become: true
tasks:
- name: Create the firewalld rule for the API, etcd, kubelet API, scheduler, and controller-manager services for k8s
firewalld:
port: "{{ item }}"
state: enabled
permanent: true
with_items:
- "6443/tcp"
- "2379-2380/tcp"
- "9537/tcp"
- "10250/tcp"
- "10251/tcp"
- "10252/tcp"
- name: Create the server side firewall rules
hosts: gcp_k8s_role_worker
become: true
tasks:
- name: Create the firewalld rule for the API, etcd, kubelet API, scheduler, and controller-manager services for k8s
firewalld:
port: "{{ item }}"
state: enabled
permanent: true
with_items:
- "80/tcp"
- "443/tcp"
- "10250/tcp"
- "30000-32767/tcp"
- name: Create the kubernetes cluster using kubeadm
hosts: gcp_first_master_true
become: true
tasks:
- name: Check for admin.conf from kubeadm
stat: path=/etc/kubernetes/admin.conf
register: admin_conf
- set_fact:
running: true
when: admin_conf.stat.exists == false
- debug:
var: running
when: admin_conf.stat.exists == false
- name: Run kubeadm if admin.conf doesn't exist
command: kubeadm init --ignore-preflight-errors all --cri-socket /run/crio/crio.sock # Add this if using flannel networking --pod-network-cidr 10.244.0.0/16
async: 180
poll: 0
register: kubeadm_running
when: admin_conf.stat.exists == false
- name: Wait for the kubelet config file to be created
wait_for:
path: /var/lib/kubelet/config.yaml
when: admin_conf.stat.exists == false
- name: Check for /var/lib/kubelet/config.yaml and replace cgroupfs with systemd as cgroupDriver if found
replace:
path: /var/lib/kubelet/config.yaml
regexp: "cgroupDriver: cgroupfs"
replace: "cgroupDriver: systemd"
when: admin_conf.stat.exists == false
- name: Restart kubelet service
service:
name: kubelet
state: restarted
- name: Check that kubeadm has completed
async_status:
jid: "{{ kubeadm_running.ansible_job_id }}"
when: admin_conf.stat.exists == false
register: job_result
until: job_result.finished
retries: 30
- name: Create kubeadm join command
shell: kubeadm token create --print-join-command
register: results
when: admin_conf.stat.exists == false
- debug:
var: results.stdout
when: admin_conf.stat.exists == false
- set_fact:
token: "{{ results.stdout | regex_search(regexp, '\\2') | first }}"
vars:
regexp: '([^\s]+\s){4}([^\s]+)'
when: admin_conf.stat.exists == false
- debug:
var: token
when: admin_conf.stat.exists == false
- set_fact:
hash: "{{ results.stdout | regex_search(regexp, '\\1') | first }}"
vars:
regexp: '--discovery-token-ca-cert-hash ([^\s]+)'
when: admin_conf.stat.exists == false
- debug:
var: hash
when: admin_conf.stat.exists == false
- name: Install weave networking for x86_64
shell: curl -sSL "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" | kubectl --kubeconfig=/etc/kubernetes/admin.conf create -f -
when:
- admin_conf.stat.exists == false
- name: Set up worker nodes
hosts: gcp_k8s_role_worker
become: true
tasks:
- name: Install kubernetes on nodes
command: kubeadm join "{{ hostvars[item]['ansible_eth0']['ipv4']['address'] }}":6443 --token "{{ hostvars[item]['token'] }}" --discovery-token-ca-cert-hash "{{ hostvars[item]['hash'] }}"
when: hostvars[item]['running'] == true
with_items: "{{ groups['gcp_first_master_true'] }}"
- name: Wait for the kubelet config file to be created
wait_for:
path: /var/lib/kubelet/config.yaml
- name: Check for /var/lib/kubelet/config.yaml and replace cgroupfs with systemd as cgroupDriver if found
replace:
path: /var/lib/kubelet/config.yaml
regexp: "cgroupDriver: cgroupfs"
replace: "cgroupDriver: systemd"
register: kube_updated
- name: Restart kubelet service
service:
name: kubelet
state: restarted
when: kube_updated.changed == true

46
gcp-lb/main.tf Normal file
View File

@ -0,0 +1,46 @@
# ---------------------------------------------------------------------------------------------------------------------
# LAUNCH A NETWORK LOAD BALANCER
# ---------------------------------------------------------------------------------------------------------------------
terraform {
# The modules used in this example have been updated with 0.12 syntax, which means the example is no longer
# compatible with any versions below 0.12.
required_version = ">= 0.12"
}
# ------------------------------------------------------------------------------
# CONFIGURE OUR GCP CONNECTION
# ------------------------------------------------------------------------------
#provider "google-beta" {
# version = "~> 2.7.0"
# region = var.region
# project = var.project
#}
# ------------------------------------------------------------------------------
# CREATE THE INTERNAL TCP LOAD BALANCER
# ------------------------------------------------------------------------------
module "lb" {
# When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you
# to a specific version of the modules, such as the following example:
# source = "github.com/gruntwork-io/terraform-google-load-balancer.git//modules/network-load-balancer?ref=v0.2.0"
source = "github.com/gruntwork-io/terraform-google-load-balancer.git//modules/network-load-balancer?ref=v0.2.1"
#source = "../../modules/network-load-balancer"
name = var.name
region = var.region
project = var.project
enable_health_check = true
health_check_port = "6443"
health_check_path = "/api"
firewall_target_tags = [var.name]
# instances = [google_compute_instance.k8s-master.self_link]
instances = "${var.gce-vms}"
custom_labels = var.custom_labels
}

8
gcp-lb/outputs.tf Normal file
View File

@ -0,0 +1,8 @@
# ------------------------------------------------------------------------------
# LOAD BALANCER OUTPUTS
# ------------------------------------------------------------------------------
output "load_balancer_ip_address" {
description = "Internal IP address of the load balancer"
value = module.lb.load_balancer_ip_address
}

12
gcp-lb/provider.tf Normal file
View File

@ -0,0 +1,12 @@
provider "google" {
credentials = file("centos-k8s-d9557c7f6db3.json")
project = "${var.project}"
region = "${var.region}"
}
provider "google-beta" {
credentials = file("centos-k8s-d9557c7f6db3.json")
project = "${var.project}"
region = "${var.region}"
}

44
gcp-lb/variables.tf Normal file
View File

@ -0,0 +1,44 @@
# ---------------------------------------------------------------------------------------------------------------------
# REQUIRED PARAMETERS
# These variables are expected to be passed in by the operator
# ---------------------------------------------------------------------------------------------------------------------
variable "project" {
description = "The project ID to create the resources in."
type = string
default = "centos-k8s"
}
variable "region" {
description = "The region to create the resources in."
type = string
default = "us-central1" # Iowa
}
variable "zone" {
description = "The GCP zone to create the sample compute instances in. Must within the region specified in 'var.region'"
type = string
default = "us-central1-a"
}
# ---------------------------------------------------------------------------------------------------------------------
# OPTIONAL PARAMETERS
# These variables have defaults, but may be overridden by the operator.
# ---------------------------------------------------------------------------------------------------------------------
variable "name" {
description = "Name for the load balancer forwarding rule and prefix for supporting resources."
type = string
default = "k8s-lb"
}
variable "custom_labels" {
description = "A map of custom labels to apply to the resources. The key is the label name and the value is the label value."
type = map(string)
default = {}
}
variable "gce-vms" {
description = "Self link to VM for LB"
type = list
}

169
k8s-master/firewall.tf Normal file
View File

@ -0,0 +1,169 @@
resource "google_compute_firewall" "ssh" {
name = "${var.network}-firewall-ssh"
network = google_compute_network.k8s-network.name
allow {
protocol = "tcp"
ports = ["22"]
}
target_tags = ["${var.network}-firewall-ssh"]
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "http" {
name = "${var.network}-firewall-http"
network = google_compute_network.k8s-network.name
allow {
protocol = "tcp"
ports = ["80"]
}
target_tags = ["${var.network}-firewall-http"]
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "https" {
name = "${var.network}-firewall-https"
network = google_compute_network.k8s-network.name
allow {
protocol = "tcp"
ports = ["443"]
}
target_tags = ["${var.network}-firewall-https"]
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "icmp" {
name = "${var.network}-firewall-icmp"
network = google_compute_network.k8s-network.name
allow {
protocol = "icmp"
}
target_tags = ["${var.network}-firewall-icmp"]
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "postgresql" {
name = "${var.network}-firewall-postgresql"
network = google_compute_network.k8s-network.name
allow {
protocol = "tcp"
ports = ["5432"]
}
target_tags = ["${var.network}-firewall-postgresql"]
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "firewall-openshift-console" {
name = "${var.network}-firewall-openshift-console"
network = google_compute_network.k8s-network.name
allow {
protocol = "tcp"
ports = ["8443"]
}
target_tags = ["${var.network}-firewall-openshift-console"]
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "firewall-secure-forward" {
name = "${var.network}-firewall-secure-forward"
network = google_compute_network.k8s-network.name
allow {
protocol = "tcp"
ports = ["24284"]
}
target_tags = ["${var.network}-firewall-secure-forward"]
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "firewall-k8s-apiserver" {
name = "${var.network}-firewall-k8s-apiserver"
network = google_compute_network.k8s-network.name
allow {
protocol = "tcp"
ports = ["6443"]
}
target_tags = ["${var.network}-firewall-k8s-apiserver"]
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "firewall-k8s-etcd-api" {
name = "${var.network}-firewall-k8s-etcd-api"
network = google_compute_network.k8s-network.name
allow {
protocol = "tcp"
ports = ["2379",
"2380",
]
}
target_tags = ["${var.network}-firewall-k8s-etcd-api"]
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "firewall-k8s-kubelet-api" {
name = "${var.network}-firewall-k8s-kubelet-api"
network = google_compute_network.k8s-network.name
allow {
protocol = "tcp"
ports = ["10250"]
}
target_tags = ["${var.network}-firewall-k8s-kubelet-api"]
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "firewall-k8s-kube-scheduler" {
name = "${var.network}-firewall-k8s-kube-scheduler"
network = google_compute_network.k8s-network.name
allow {
protocol = "tcp"
ports = ["10251"]
}
target_tags = ["${var.network}-firewall-k8s-kube-scheduler"]
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "firewall-k8s-kube-controller" {
name = "${var.network}-firewall-k8s-kube-controller"
network = google_compute_network.k8s-network.name
allow {
protocol = "tcp"
ports = ["10252"]
}
target_tags = ["${var.network}-firewall-k8s-kube-controller"]
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "firewall-k8s-nodeports" {
name = "${var.network}-firewall-k8s-nodeports"
network = google_compute_network.k8s-network.name
allow {
protocol = "tcp"
ports = ["30000-32676"]
}
target_tags = ["${var.network}-firewall-k8s-nodeports"]
source_ranges = ["0.0.0.0/0"]
}

104
k8s-master/main.tf Normal file
View File

@ -0,0 +1,104 @@
resource "google_compute_instance" "k8s-master" {
count = 1
name = "k8s-master-${count.index + 1}"
## for a setup having multiple instances of the same type, you can do
## the following, there would be 2 instances of the same configuration
## provisioned
# count = 2
# name = "${var.instance-name}-${count.index}"
machine_type = var.vm_type["3point75gig"]
zone = var.region
tags = [
"${var.network}-firewall-ssh",
"${var.network}-firewall-icmp",
"${var.network}-firewall-k8s-apiserver",
"${var.network}-firewall-k8s-etcd-api",
"${var.network}-firewall-k8s-kubelet-api",
"${var.network}-firewall-k8s-kube-scheduler",
"${var.network}-firewall-k8s-kube-controller",
]
labels = {
k8s-role = "master"
first-master = "true"
}
boot_disk {
auto_delete = "true"
initialize_params {
image = var.os["centos7"]
type = "pd-standard"
}
}
metadata = {
hostname = "k8s-master.ervine.org"
startup-script = <<SCRIPT
${file("${path.module}/scripts/get-metadata-gce.sh")}
${file("${path.module}/scripts/startup.sh")}
SCRIPT
}
network_interface {
subnetwork = google_compute_subnetwork.k8s-network_subnetwork.name
access_config {
// Ephemeral IP
}
}
}
resource "google_compute_instance" "k8s-worker" {
count = 2
name = "k8s-worker-${count.index + 1}"
## for a setup having multiple instances of the same type, you can do
## the following, there would be 2 instances of the same configuration
## provisioned
# count = 2
# name = "${var.instance-name}-${count.index}"
machine_type = var.vm_type["1point7gig"]
zone = var.region
tags = [
"${var.network}-firewall-ssh",
"${var.network}-firewall-icmp",
"${var.network}-firewall-http",
"${var.network}-firewall-https",
"${var.network}-firewall-k8s-kubelet-api",
"${var.network}-firewall-k8s-nodeports",
]
labels = {
k8s-role = "worker"
}
boot_disk {
auto_delete = "true"
initialize_params {
image = var.os["centos7"]
type = "pd-standard"
}
}
metadata = {
hostname = "k8s-worker-${count.index + 1}"
startup-script = <<SCRIPT
${file("${path.module}/scripts/get-metadata-gce.sh")}
${file("${path.module}/scripts/startup.sh")}
SCRIPT
}
network_interface {
subnetwork = google_compute_subnetwork.k8s-network_subnetwork.name
access_config {
// Ephemeral IP
}
}
}

11
k8s-master/network.tf Normal file
View File

@ -0,0 +1,11 @@
resource "google_compute_network" "k8s-network" {
name = var.network
}
resource "google_compute_subnetwork" "k8s-network_subnetwork" {
name = "${var.network}-subnetwork-${var.subnetwork-region}"
region = var.subnetwork-region
network = google_compute_network.k8s-network.self_link
ip_cidr_range = "10.0.0.0/16"
}

3
k8s-master/outputs.tf Normal file
View File

@ -0,0 +1,3 @@
output "vm_link" {
value = "${google_compute_instance.k8s-master.*.self_link}"
}

12
k8s-master/provider.tf Normal file
View File

@ -0,0 +1,12 @@
provider "google" {
credentials = file("centos-k8s-d9557c7f6db3.json")
project = "${var.project-name}"
region = "${var.region}"
}
provider "google-beta" {
credentials = file("centos-k8s-d9557c7f6db3.json")
project = "${var.project-name}"
region = "${var.region}"
}

View File

@ -0,0 +1,4 @@
# Get data
JOIN_ADDRS=$(gcloud compute instances list -r $INSTANCE_MASK --format=json | jq --raw-output .[].networkInterfaces[0].networkIP | tr '\n' ' ')
ZONE=$(curl -sf http://metadata/computeMetadata/v1/instance/zone -H "Metadata-Flavor: Google" | awk -F/ '{print $4}' 2>/dev/null)
DC_NAME="gce-${ZONE}"

View File

@ -0,0 +1,9 @@
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAaAAAABNlY2RzYS
1zaGEyLW5pc3RwMjU2AAAACG5pc3RwMjU2AAAAQQT3FG4Me+t762wVftgWzUMQcHc8NM+r
+/a86F8ckPFPwxNI/YT3XYQh12cwHuNrtkPg/XjlI3ZCcl1wr9vsq8PsAAAAwNH5WvzR+V
r8AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPcUbgx763vrbBV+
2BbNQxBwdzw0z6v79rzoXxyQ8U/DE0j9hPddhCHXZzAe42u2Q+D9eOUjdkJyXXCv2+yrw+
wAAAAgEVlO3vKCe1us2WG2HbbQg8JUgembtrgidh/j598VW8QAAAAjam9ubnlAamVydmlu
ZS1mZWRvcmEuaGtnLnJlZGhhdC5jb20BAgMEBQ==
-----END OPENSSH PRIVATE KEY-----

View File

@ -0,0 +1 @@
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPcUbgx763vrbBV+2BbNQxBwdzw0z6v79rzoXxyQ8U/DE0j9hPddhCHXZzAe42u2Q+D9eOUjdkJyXXCv2+yrw+w= jonny@jervine-fedora.hkg.redhat.com

View File

@ -0,0 +1,15 @@
#/bin/sh
# Create ansible user
useradd -G google-sudoers -c "Ansible User" ansible
mkdir /home/ansible/.ssh
chown ansible:ansible /home/ansible/.ssh
chmod 700 /home/ansible/.ssh
cat <<EOF > /home/ansible/.ssh/authorized_keys
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPcUbgx763vrbBV+2BbNQxBwdzw0z6v79rzoXxyQ8U/DE0j9hPddhCHXZzAe42u2Q+D9eOUjdkJyXXCv2+yrw+w=
EOF
chown ansible:ansible /home/ansible/.ssh/authorized_keys
chmod 600 /home/ansible/.ssh/authorized_keys

33
k8s-master/variables.tf Normal file
View File

@ -0,0 +1,33 @@
variable "region" {
default = "us-central1-a" # Iowa
}
variable "project-name" {
default = "centos-k8s"
}
variable "subnetwork-region" {
default = "us-central1"
}
variable "network" {
default = "k8s-network"
}
variable "vm_type" {
default = {
"614meg" = "f1-micro"
"1point7gig" = "g1-small"
"3point75gig" = "n1-standard-1"
}
}
variable "os" {
default = {
"centos7" = "centos-7-v20190619"
"debian9" = "debian-9-stretch-v20190618"
"ubuntu-1604-lts" = "ubuntu-1604-xenial-v20190628"
"ubuntu-1804-lts" = "ubuntu-1804-bionic-v20190628"
}
}

169
k8s-workers/firewall.tf Normal file
View File

@ -0,0 +1,169 @@
resource "google_compute_firewall" "ssh" {
name = "${var.network}-firewall-ssh"
network = google_compute_network.k8s-network.name
allow {
protocol = "tcp"
ports = ["22"]
}
target_tags = ["${var.network}-firewall-ssh"]
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "http" {
name = "${var.network}-firewall-http"
network = google_compute_network.k8s-network.name
allow {
protocol = "tcp"
ports = ["80"]
}
target_tags = ["${var.network}-firewall-http"]
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "https" {
name = "${var.network}-firewall-https"
network = google_compute_network.k8s-network.name
allow {
protocol = "tcp"
ports = ["443"]
}
target_tags = ["${var.network}-firewall-https"]
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "icmp" {
name = "${var.network}-firewall-icmp"
network = google_compute_network.k8s-network.name
allow {
protocol = "icmp"
}
target_tags = ["${var.network}-firewall-icmp"]
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "postgresql" {
name = "${var.network}-firewall-postgresql"
network = google_compute_network.k8s-network.name
allow {
protocol = "tcp"
ports = ["5432"]
}
target_tags = ["${var.network}-firewall-postgresql"]
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "firewall-openshift-console" {
name = "${var.network}-firewall-openshift-console"
network = google_compute_network.k8s-network.name
allow {
protocol = "tcp"
ports = ["8443"]
}
target_tags = ["${var.network}-firewall-openshift-console"]
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "firewall-secure-forward" {
name = "${var.network}-firewall-secure-forward"
network = google_compute_network.k8s-network.name
allow {
protocol = "tcp"
ports = ["24284"]
}
target_tags = ["${var.network}-firewall-secure-forward"]
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "firewall-k8s-apiserver" {
name = "${var.network}-firewall-k8s-apiserver"
network = google_compute_network.k8s-network.name
allow {
protocol = "tcp"
ports = ["6443"]
}
target_tags = ["${var.network}-firewall-k8s-apiserver"]
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "firewall-k8s-etcd-api" {
name = "${var.network}-firewall-k8s-etcd-api"
network = google_compute_network.k8s-network.name
allow {
protocol = "tcp"
ports = ["2379",
"2380",
]
}
target_tags = ["${var.network}-firewall-k8s-etcd-api"]
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "firewall-k8s-kubelet-api" {
name = "${var.network}-firewall-k8s-kubelet-api"
network = google_compute_network.k8s-network.name
allow {
protocol = "tcp"
ports = ["10250"]
}
target_tags = ["${var.network}-firewall-k8s-kubelet-api"]
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "firewall-k8s-kube-scheduler" {
name = "${var.network}-firewall-k8s-kube-scheduler"
network = google_compute_network.k8s-network.name
allow {
protocol = "tcp"
ports = ["10251"]
}
target_tags = ["${var.network}-firewall-k8s-kube-scheduler"]
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "firewall-k8s-kube-controller" {
name = "${var.network}-firewall-k8s-kube-controller"
network = google_compute_network.k8s-network.name
allow {
protocol = "tcp"
ports = ["10252"]
}
target_tags = ["${var.network}-firewall-k8s-kube-controller"]
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "firewall-k8s-nodeports" {
name = "${var.network}-firewall-k8s-nodeports"
network = google_compute_network.k8s-network.name
allow {
protocol = "tcp"
ports = ["30000-32676"]
}
target_tags = ["${var.network}-firewall-k8s-nodeports"]
source_ranges = ["0.0.0.0/0"]
}

55
k8s-workers/main.tf Normal file
View File

@ -0,0 +1,55 @@
resource "google_compute_instance" "k8s-worker" {
count = 2
name = "k8s-worker-${count.index + 1}"
## for a setup having multiple instances of the same type, you can do
## the following, there would be 2 instances of the same configuration
## provisioned
# count = 2
# name = "${var.instance-name}-${count.index}"
machine_type = var.vm_type["1point7gig"]
zone = var.region
tags = [
"${var.network}-firewall-ssh",
"${var.network}-firewall-icmp",
"${var.network}-firewall-http",
"${var.network}-firewall-https",
"${var.network}-firewall-k8s-kubelet-api",
"${var.network}-firewall-k8s-nodeports",
]
labels = {
k8s-role = "worker"
}
boot_disk {
auto_delete = "true"
initialize_params {
image = var.os["centos7"]
type = "pd-standard"
}
}
metadata = {
hostname = "k8s-worker-${count.index + 1}"
startup-script = <<SCRIPT
${file("${path.module}/scripts/get-metadata-gce.sh")}
${file("${path.module}/scripts/startup.sh")}
SCRIPT
}
network_interface {
subnetwork = google_compute_subnetwork.k8s-network_subnetwork.name
access_config {
// Ephemeral IP
}
}
scheduling {
preemptible = true
automatic_restart = false
}
}

3
k8s-workers/outputs.tf Normal file
View File

@ -0,0 +1,3 @@
output "vm_link" {
value = "${google_compute_instance.k8s-worker.*.self_link}"
}

12
k8s-workers/provider.tf Normal file
View File

@ -0,0 +1,12 @@
provider "google" {
credentials = file("centos-k8s-d9557c7f6db3.json")
project = "${var.project-name}"
region = "${var.region}"
}
provider "google-beta" {
credentials = file("centos-k8s-d9557c7f6db3.json")
project = "${var.project-name}"
region = "${var.region}"
}

View File

@ -0,0 +1,4 @@
# Get data
JOIN_ADDRS=$(gcloud compute instances list -r $INSTANCE_MASK --format=json | jq --raw-output .[].networkInterfaces[0].networkIP | tr '\n' ' ')
ZONE=$(curl -sf http://metadata/computeMetadata/v1/instance/zone -H "Metadata-Flavor: Google" | awk -F/ '{print $4}' 2>/dev/null)
DC_NAME="gce-${ZONE}"

View File

@ -0,0 +1,9 @@
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAaAAAABNlY2RzYS
1zaGEyLW5pc3RwMjU2AAAACG5pc3RwMjU2AAAAQQT3FG4Me+t762wVftgWzUMQcHc8NM+r
+/a86F8ckPFPwxNI/YT3XYQh12cwHuNrtkPg/XjlI3ZCcl1wr9vsq8PsAAAAwNH5WvzR+V
r8AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPcUbgx763vrbBV+
2BbNQxBwdzw0z6v79rzoXxyQ8U/DE0j9hPddhCHXZzAe42u2Q+D9eOUjdkJyXXCv2+yrw+
wAAAAgEVlO3vKCe1us2WG2HbbQg8JUgembtrgidh/j598VW8QAAAAjam9ubnlAamVydmlu
ZS1mZWRvcmEuaGtnLnJlZGhhdC5jb20BAgMEBQ==
-----END OPENSSH PRIVATE KEY-----

View File

@ -0,0 +1 @@
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPcUbgx763vrbBV+2BbNQxBwdzw0z6v79rzoXxyQ8U/DE0j9hPddhCHXZzAe42u2Q+D9eOUjdkJyXXCv2+yrw+w= jonny@jervine-fedora.hkg.redhat.com

View File

@ -0,0 +1,14 @@
#/bin/sh
# Create ansible user
useradd -G google-sudoers -c "Ansible User" ansible
mkdir /home/ansible/.ssh
chown ansible:ansible /home/ansible/.ssh
chmod 700 /home/ansible/.ssh
cat <<EOF > /home/ansible/.ssh/authorized_keys
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPcUbgx763vrbBV+2BbNQxBwdzw0z6v79rzoXxyQ8U/DE0j9hPddhCHXZzAe42u2Q+D9eOUjdkJyXXCv2+yrw+w=
EOF
chown ansible:ansible /home/ansible/.ssh/authorized_keys
chmod 600 /home/ansible/.ssh/authorized_keys

33
k8s-workers/variables.tf Normal file
View File

@ -0,0 +1,33 @@
variable "region" {
default = "us-central1-a" # Iowa
}
variable "project-name" {
default = "centos-k8s"
}
variable "subnetwork-region" {
default = "us-central1"
}
variable "network" {
default = "k8s-network"
}
variable "vm_type" {
default = {
"614meg" = "f1-micro"
"1point7gig" = "g1-small"
"3point75gig" = "n1-standard-1"
}
}
variable "os" {
default = {
"centos7" = "centos-7-v20190619"
"debian9" = "debian-9-stretch-v20190618"
"ubuntu-1604-lts" = "ubuntu-1604-xenial-v20190628"
"ubuntu-1804-lts" = "ubuntu-1804-bionic-v20190628"
}
}

12
main.tf Normal file
View File

@ -0,0 +1,12 @@
module "k8s-master" {
source = "./k8s-master"
}
#module "k8s-workers" {
# source = "./k8s-workers"
#}
module "gcp-lb" {
source = "./gcp-lb"
gce-vms = "${module.k8s-master.vm_link}"
}

12
provider.tf Normal file
View File

@ -0,0 +1,12 @@
provider "google" {
credentials = file("centos-k8s-d9557c7f6db3.json")
project = "${var.project}"
region = "${var.region}"
}
provider "google-beta" {
credentials = file("centos-k8s-d9557c7f6db3.json")
project = "${var.project}"
region = "${var.region}"
}

33
variables.tf Normal file
View File

@ -0,0 +1,33 @@
variable "region" {
default = "us-central1-a" # Iowa
}
variable "project" {
default = "centos-k8s"
}
variable "subnetwork-region" {
default = "us-central1"
}
variable "network" {
default = "k8s-network"
}
variable "vm_type" {
default = {
"614meg" = "f1-micro"
"1point7gig" = "g1-small"
"3point75gig" = "n1-standard-1"
}
}
variable "os" {
default = {
"centos7" = "centos-7-v20190619"
"debian9" = "debian-9-stretch-v20190618"
"ubuntu-1604-lts" = "ubuntu-1604-xenial-v20190628"
"ubuntu-1804-lts" = "ubuntu-1804-bionic-v20190628"
}
}

4
versions.tf Normal file
View File

@ -0,0 +1,4 @@
terraform {
required_version = ">= 0.12"
}