diff --git a/ansible/inv-gcp.yml b/ansible/inv-gcp.yml
new file mode 100644
index 0000000..46c26da
--- /dev/null
+++ b/ansible/inv-gcp.yml
@@ -0,0 +1,25 @@
+plugin: gcp_compute
+zones: # populate inventory with instances in these regions
+ - us-central1-a
+projects:
+ - centos-k8s
+filters:
+# - scheduling.automaticRestart = true AND machineType = n1-standard-1
+service_account_file: /home/jonny/terraform/gcp-k8s/centos-k8s-d9557c7f6db3.json
+auth_kind: serviceaccount
+scopes:
+ - 'https://www.googleapis.com/auth/cloud-platform'
+ - 'https://www.googleapis.com/auth/compute.readonly'
+keyed_groups:
+ # Create groups from GCE labels
+ - prefix: gcp
+ key: labels
+hostnames:
+ # List host by name instead of the default public ip
+ - name
+compose:
+ # Set an inventory parameter to use the Public IP address to connect to the host
+ # For Private ip use "networkInterfaces[0].networkIP"
+ ansible_host: networkInterfaces[0].accessConfigs[0].natIP
+
+
diff --git a/ansible/inventory/gce.ini b/ansible/inventory/gce.ini
new file mode 100644
index 0000000..af27a9c
--- /dev/null
+++ b/ansible/inventory/gce.ini
@@ -0,0 +1,76 @@
+# Copyright 2013 Google Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# The GCE inventory script has the following dependencies:
+# 1. A valid Google Cloud Platform account with Google Compute Engine
+# enabled. See https://cloud.google.com
+# 2. An OAuth2 Service Account flow should be enabled. This will generate
+# a private key file that the inventory script will use for API request
+# authorization. See https://developers.google.com/accounts/docs/OAuth2
+# 3. Convert the private key from PKCS12 to PEM format
+# $ openssl pkcs12 -in pkey.pkcs12 -passin pass:notasecret \
+# > -nodes -nocerts | openssl rsa -out pkey.pem
+# 4. The libcloud (>=0.13.3) python libray. See http://libcloud.apache.org
+#
+# (See ansible/test/gce_tests.py comments for full install instructions)
+#
+# Author: Eric Johnson
+# Contributors: John Roach
+
+[gce]
+# GCE Service Account configuration information can be stored in the
+# libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already
+# exist in your PYTHONPATH and be picked up automatically with an import
+# statement in the inventory script. However, you can specify an absolute
+# path to the secrets.py file with 'libcloud_secrets' parameter.
+# This option will be deprecated in a future release.
+libcloud_secrets =
+
+# If you are not going to use a 'secrets.py' file, you can set the necessary
+# authorization parameters here.
+# You can add multiple gce projects to by using a comma separated list. Make
+# sure that the service account used has permissions on said projects.
+gce_service_account_email_address =
+gce_service_account_pem_file_path =
+gce_project_id =
+gce_zone =
+
+# Filter inventory based on state. Leave undefined to return instances regardless of state.
+# example: Uncomment to only return inventory in the running or provisioning state
+#instance_states = RUNNING,PROVISIONING
+
+# Filter inventory based on instance tags. Leave undefined to return instances regardless of tags.
+# example: Uncomment to only return inventory with the http-server or https-server tag
+#instance_tags = http-server,https-server
+
+
+[inventory]
+# The 'inventory_ip_type' parameter specifies whether 'ansible_ssh_host' should
+# contain the instance internal or external address. Values may be either
+# 'internal' or 'external'. If 'external' is specified but no external instance
+# address exists, the internal address will be used.
+# The INVENTORY_IP_TYPE environment variable will override this value.
+inventory_ip_type =
+
+[cache]
+# directory in which cache should be created
+cache_path = ~/.ansible/tmp
+
+# The number of seconds a cache file is considered valid. After this many
+# seconds, a new API call will be made, and the cache file will be updated.
+# To disable the cache, set this value to 0
+cache_max_age = 300
diff --git a/ansible/inventory/gce.py b/ansible/inventory/gce.py
new file mode 100755
index 0000000..0a7df3f
--- /dev/null
+++ b/ansible/inventory/gce.py
@@ -0,0 +1,521 @@
+#!/usr/bin/env python
+
+# Copyright: (c) 2013, Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+GCE external inventory script
+=================================
+
+Generates inventory that Ansible can understand by making API requests
+Google Compute Engine via the libcloud library. Full install/configuration
+instructions for the gce* modules can be found in the comments of
+ansible/test/gce_tests.py.
+
+When run against a specific host, this script returns the following variables
+based on the data obtained from the libcloud Node object:
+ - gce_uuid
+ - gce_id
+ - gce_image
+ - gce_machine_type
+ - gce_private_ip
+ - gce_public_ip
+ - gce_name
+ - gce_description
+ - gce_status
+ - gce_zone
+ - gce_tags
+ - gce_metadata
+ - gce_network
+ - gce_subnetwork
+
+When run in --list mode, instances are grouped by the following categories:
+ - zone:
+ zone group name examples are us-central1-b, europe-west1-a, etc.
+ - instance tags:
+ An entry is created for each tag. For example, if you have two instances
+ with a common tag called 'foo', they will both be grouped together under
+ the 'tag_foo' name.
+ - network name:
+ the name of the network is appended to 'network_' (e.g. the 'default'
+ network will result in a group named 'network_default')
+ - machine type
+ types follow a pattern like n1-standard-4, g1-small, etc.
+ - running status:
+ group name prefixed with 'status_' (e.g. status_running, status_stopped,..)
+ - image:
+ when using an ephemeral/scratch disk, this will be set to the image name
+ used when creating the instance (e.g. debian-7-wheezy-v20130816). when
+ your instance was created with a root persistent disk it will be set to
+ 'persistent_disk' since there is no current way to determine the image.
+
+Examples:
+ Execute uname on all instances in the us-central1-a zone
+ $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
+
+ Use the GCE inventory script to print out instance specific information
+ $ contrib/inventory/gce.py --host my_instance
+
+Author: Eric Johnson
+Contributors: Matt Hite , Tom Melendez ,
+ John Roach
+Version: 0.0.4
+'''
+
+try:
+ import pkg_resources
+except ImportError:
+ # Use pkg_resources to find the correct versions of libraries and set
+ # sys.path appropriately when there are multiversion installs. We don't
+ # fail here as there is code that better expresses the errors where the
+ # library is used.
+ pass
+
+USER_AGENT_PRODUCT = "Ansible-gce_inventory_plugin"
+USER_AGENT_VERSION = "v2"
+
+import sys
+import os
+import argparse
+
+from time import time
+
+from ansible.module_utils.six.moves import configparser
+
+import logging
+logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
+
+import json
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ _ = Provider.GCE
+except Exception:
+ sys.exit("GCE inventory script requires libcloud >= 0.13")
+
+
+class CloudInventoryCache(object):
+ def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp',
+ cache_max_age=300):
+ cache_dir = os.path.expanduser(cache_path)
+ if not os.path.exists(cache_dir):
+ os.makedirs(cache_dir)
+ self.cache_path_cache = os.path.join(cache_dir, cache_name)
+
+ self.cache_max_age = cache_max_age
+
+ def is_valid(self, max_age=None):
+ ''' Determines if the cache files have expired, or if it is still valid '''
+
+ if max_age is None:
+ max_age = self.cache_max_age
+
+ if os.path.isfile(self.cache_path_cache):
+ mod_time = os.path.getmtime(self.cache_path_cache)
+ current_time = time()
+ if (mod_time + max_age) > current_time:
+ return True
+
+ return False
+
+ def get_all_data_from_cache(self, filename=''):
+ ''' Reads the JSON inventory from the cache file. Returns Python dictionary. '''
+
+ data = ''
+ if not filename:
+ filename = self.cache_path_cache
+ with open(filename, 'r') as cache:
+ data = cache.read()
+ return json.loads(data)
+
+ def write_to_cache(self, data, filename=''):
+ ''' Writes data to file as JSON. Returns True. '''
+ if not filename:
+ filename = self.cache_path_cache
+ json_data = json.dumps(data)
+ with open(filename, 'w') as cache:
+ cache.write(json_data)
+ return True
+
+
+class GceInventory(object):
+ def __init__(self):
+ # Cache object
+ self.cache = None
+ # dictionary containing inventory read from disk
+ self.inventory = {}
+
+ # Read settings and parse CLI arguments
+ self.parse_cli_args()
+ self.config = self.get_config()
+ self.drivers = self.get_gce_drivers()
+ self.ip_type = self.get_inventory_options()
+ if self.ip_type:
+ self.ip_type = self.ip_type.lower()
+
+ # Cache management
+ start_inventory_time = time()
+ cache_used = False
+ if self.args.refresh_cache or not self.cache.is_valid():
+ self.do_api_calls_update_cache()
+ else:
+ self.load_inventory_from_cache()
+ cache_used = True
+ self.inventory['_meta']['stats'] = {'use_cache': True}
+ self.inventory['_meta']['stats'] = {
+ 'inventory_load_time': time() - start_inventory_time,
+ 'cache_used': cache_used
+ }
+
+ # Just display data for specific host
+ if self.args.host:
+ print(self.json_format_dict(
+ self.inventory['_meta']['hostvars'][self.args.host],
+ pretty=self.args.pretty))
+ else:
+ # Otherwise, assume user wants all instances grouped
+ zones = self.parse_env_zones()
+ print(self.json_format_dict(self.inventory,
+ pretty=self.args.pretty))
+ sys.exit(0)
+
+ def get_config(self):
+ """
+ Reads the settings from the gce.ini file.
+
+ Populates a ConfigParser object with defaults and
+ attempts to read an .ini-style configuration from the filename
+ specified in GCE_INI_PATH. If the environment variable is
+ not present, the filename defaults to gce.ini in the current
+ working directory.
+ """
+ gce_ini_default_path = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)), "gce.ini")
+ gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
+
+ # Create a ConfigParser.
+ # This provides empty defaults to each key, so that environment
+ # variable configuration (as opposed to INI configuration) is able
+ # to work.
+ config = configparser.ConfigParser(defaults={
+ 'gce_service_account_email_address': '',
+ 'gce_service_account_pem_file_path': '',
+ 'gce_project_id': '',
+ 'gce_zone': '',
+ 'libcloud_secrets': '',
+ 'instance_tags': '',
+ 'inventory_ip_type': '',
+ 'cache_path': '~/.ansible/tmp',
+ 'cache_max_age': '300'
+ })
+ if 'gce' not in config.sections():
+ config.add_section('gce')
+ if 'inventory' not in config.sections():
+ config.add_section('inventory')
+ if 'cache' not in config.sections():
+ config.add_section('cache')
+
+ config.read(gce_ini_path)
+
+ #########
+ # Section added for processing ini settings
+ #########
+
+ # Set the instance_states filter based on config file options
+ self.instance_states = []
+ if config.has_option('gce', 'instance_states'):
+ states = config.get('gce', 'instance_states')
+ # Ignore if instance_states is an empty string.
+ if states:
+ self.instance_states = states.split(',')
+
+ # Set the instance_tags filter, env var overrides config from file
+ # and cli param overrides all
+ if self.args.instance_tags:
+ self.instance_tags = self.args.instance_tags
+ else:
+ self.instance_tags = os.environ.get(
+ 'GCE_INSTANCE_TAGS', config.get('gce', 'instance_tags'))
+ if self.instance_tags:
+ self.instance_tags = self.instance_tags.split(',')
+
+ # Caching
+ cache_path = config.get('cache', 'cache_path')
+ cache_max_age = config.getint('cache', 'cache_max_age')
+ # TOOD(supertom): support project-specific caches
+ cache_name = 'ansible-gce.cache'
+ self.cache = CloudInventoryCache(cache_path=cache_path,
+ cache_max_age=cache_max_age,
+ cache_name=cache_name)
+ return config
+
+ def get_inventory_options(self):
+ """Determine inventory options. Environment variables always
+ take precedence over configuration files."""
+ ip_type = self.config.get('inventory', 'inventory_ip_type')
+ # If the appropriate environment variables are set, they override
+ # other configuration
+ ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
+ return ip_type
+
+ def get_gce_drivers(self):
+ """Determine the GCE authorization settings and return a list of
+ libcloud drivers.
+ """
+ # Attempt to get GCE params from a configuration file, if one
+ # exists.
+ secrets_path = self.config.get('gce', 'libcloud_secrets')
+ secrets_found = False
+
+ try:
+ import secrets
+ args = list(secrets.GCE_PARAMS)
+ kwargs = secrets.GCE_KEYWORD_PARAMS
+ secrets_found = True
+ except Exception:
+ pass
+
+ if not secrets_found and secrets_path:
+ if not secrets_path.endswith('secrets.py'):
+ err = "Must specify libcloud secrets file as "
+ err += "/absolute/path/to/secrets.py"
+ sys.exit(err)
+ sys.path.append(os.path.dirname(secrets_path))
+ try:
+ import secrets
+ args = list(getattr(secrets, 'GCE_PARAMS', []))
+ kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
+ secrets_found = True
+ except Exception:
+ pass
+
+ if not secrets_found:
+ args = [
+ self.config.get('gce', 'gce_service_account_email_address'),
+ self.config.get('gce', 'gce_service_account_pem_file_path')
+ ]
+ kwargs = {'project': self.config.get('gce', 'gce_project_id'),
+ 'datacenter': self.config.get('gce', 'gce_zone')}
+
+ # If the appropriate environment variables are set, they override
+ # other configuration; process those into our args and kwargs.
+ args[0] = os.environ.get('GCE_EMAIL', args[0])
+ args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
+ args[1] = os.environ.get('GCE_CREDENTIALS_FILE_PATH', args[1])
+
+ kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
+ kwargs['datacenter'] = os.environ.get('GCE_ZONE', kwargs['datacenter'])
+
+ gce_drivers = []
+ projects = kwargs['project'].split(',')
+ for project in projects:
+ kwargs['project'] = project
+ gce = get_driver(Provider.GCE)(*args, **kwargs)
+ gce.connection.user_agent_append(
+ '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
+ )
+ gce_drivers.append(gce)
+ return gce_drivers
+
+ def parse_env_zones(self):
+ '''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
+ If provided, this will be used to filter the results of the grouped_instances call'''
+ import csv
+ reader = csv.reader([os.environ.get('GCE_ZONE', "")], skipinitialspace=True)
+ zones = [r for r in reader]
+ return [z for z in zones[0]]
+
+ def parse_cli_args(self):
+ ''' Command line argument processing '''
+
+ parser = argparse.ArgumentParser(
+ description='Produce an Ansible Inventory file based on GCE')
+ parser.add_argument('--list', action='store_true', default=True,
+ help='List instances (default: True)')
+ parser.add_argument('--host', action='store',
+ help='Get all information about an instance')
+ parser.add_argument('--instance-tags', action='store',
+ help='Only include instances with this tags, separated by comma')
+ parser.add_argument('--pretty', action='store_true', default=False,
+ help='Pretty format (default: False)')
+ parser.add_argument(
+ '--refresh-cache', action='store_true', default=False,
+ help='Force refresh of cache by making API requests (default: False - use cache files)')
+ self.args = parser.parse_args()
+
+ def node_to_dict(self, inst):
+ md = {}
+
+ if inst is None:
+ return {}
+
+ if 'items' in inst.extra['metadata']:
+ for entry in inst.extra['metadata']['items']:
+ md[entry['key']] = entry['value']
+
+ net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
+ subnet = None
+ if 'subnetwork' in inst.extra['networkInterfaces'][0]:
+ subnet = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
+ # default to exernal IP unless user has specified they prefer internal
+ if self.ip_type == 'internal':
+ ssh_host = inst.private_ips[0]
+ else:
+ ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
+
+ return {
+ 'gce_uuid': inst.uuid,
+ 'gce_id': inst.id,
+ 'gce_image': inst.image,
+ 'gce_machine_type': inst.size,
+ 'gce_private_ip': inst.private_ips[0],
+ 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
+ 'gce_name': inst.name,
+ 'gce_description': inst.extra['description'],
+ 'gce_status': inst.extra['status'],
+ 'gce_zone': inst.extra['zone'].name,
+ 'gce_tags': inst.extra['tags'],
+ 'gce_metadata': md,
+ 'gce_network': net,
+ 'gce_subnetwork': subnet,
+ # Hosts don't have a public name, so we add an IP
+ 'ansible_ssh_host': ssh_host
+ }
+
+ def load_inventory_from_cache(self):
+ ''' Loads inventory from JSON on disk. '''
+
+ try:
+ self.inventory = self.cache.get_all_data_from_cache()
+ hosts = self.inventory['_meta']['hostvars']
+ except Exception as e:
+ print(
+ "Invalid inventory file %s. Please rebuild with -refresh-cache option."
+ % (self.cache.cache_path_cache))
+ raise
+
+ def do_api_calls_update_cache(self):
+ ''' Do API calls and save data in cache. '''
+ zones = self.parse_env_zones()
+ data = self.group_instances(zones)
+ self.cache.write_to_cache(data)
+ self.inventory = data
+
+ def list_nodes(self):
+ all_nodes = []
+ params, more_results = {'maxResults': 500}, True
+ while more_results:
+ for driver in self.drivers:
+ driver.connection.gce_params = params
+ all_nodes.extend(driver.list_nodes())
+ more_results = 'pageToken' in params
+ return all_nodes
+
+ def group_instances(self, zones=None):
+ '''Group all instances'''
+ groups = {}
+ meta = {}
+ meta["hostvars"] = {}
+
+ for node in self.list_nodes():
+
+ # This check filters on the desired instance states defined in the
+ # config file with the instance_states config option.
+ #
+ # If the instance_states list is _empty_ then _ALL_ states are returned.
+ #
+ # If the instance_states list is _populated_ then check the current
+ # state against the instance_states list
+ if self.instance_states and not node.extra['status'] in self.instance_states:
+ continue
+
+ # This check filters on the desired instance tags defined in the
+ # config file with the instance_tags config option, env var GCE_INSTANCE_TAGS,
+ # or as the cli param --instance-tags.
+ #
+ # If the instance_tags list is _empty_ then _ALL_ instances are returned.
+ #
+ # If the instance_tags list is _populated_ then check the current
+ # instance tags against the instance_tags list. If the instance has
+ # at least one tag from the instance_tags list, it is returned.
+ if self.instance_tags and not set(self.instance_tags) & set(node.extra['tags']):
+ continue
+
+ name = node.name
+
+ meta["hostvars"][name] = self.node_to_dict(node)
+
+ zone = node.extra['zone'].name
+
+ # To avoid making multiple requests per zone
+ # we list all nodes and then filter the results
+ if zones and zone not in zones:
+ continue
+
+ if zone in groups:
+ groups[zone].append(name)
+ else:
+ groups[zone] = [name]
+
+ tags = node.extra['tags']
+ for t in tags:
+ if t.startswith('group-'):
+ tag = t[6:]
+ else:
+ tag = 'tag_%s' % t
+ if tag in groups:
+ groups[tag].append(name)
+ else:
+ groups[tag] = [name]
+
+ net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
+ net = 'network_%s' % net
+ if net in groups:
+ groups[net].append(name)
+ else:
+ groups[net] = [name]
+
+ machine_type = node.size
+ if machine_type in groups:
+ groups[machine_type].append(name)
+ else:
+ groups[machine_type] = [name]
+
+ image = node.image or 'persistent_disk'
+ if image in groups:
+ groups[image].append(name)
+ else:
+ groups[image] = [name]
+
+ status = node.extra['status']
+ stat = 'status_%s' % status.lower()
+ if stat in groups:
+ groups[stat].append(name)
+ else:
+ groups[stat] = [name]
+
+ for private_ip in node.private_ips:
+ groups[private_ip] = [name]
+
+ if len(node.public_ips) >= 1:
+ for public_ip in node.public_ips:
+ groups[public_ip] = [name]
+
+ groups["_meta"] = meta
+
+ return groups
+
+ def json_format_dict(self, data, pretty=False):
+ ''' Converts a dict to a JSON object and dumps it as a formatted
+ string '''
+
+ if pretty:
+ return json.dumps(data, sort_keys=True, indent=2)
+ else:
+ return json.dumps(data)
+
+
+# Run the script
+if __name__ == '__main__':
+ GceInventory()
diff --git a/ansible/master-node-create.yaml b/ansible/master-node-create.yaml
new file mode 100644
index 0000000..d34ba3c
--- /dev/null
+++ b/ansible/master-node-create.yaml
@@ -0,0 +1,55 @@
+- name: Set up master node
+ hosts: masters
+ become: true
+ tasks:
+ - name: Check for admin.conf from kubeadm
+ stat: path=/etc/kubernetes/admin.conf
+ register: admin_conf
+
+ - set_fact:
+ running: admin_conf.stat.exists
+
+ - name: Run kubeadm if admin.conf doesn't exist
+ command: kubeadm init
+ when: admin_conf.stat.exists == false
+
+ - name: Create kubeadm join command
+ shell: kubeadm token create --print-join-command
+ register: results
+ when: admin_conf.stat.exists == false
+ - debug:
+ var: results.stdout
+ when: admin_conf.stat.exists == false
+ - set_fact:
+ token: "{{ results.stdout | regex_search(regexp, '\\2') | first }}"
+ vars:
+ regexp: '([^\s]+\s){4}([^\s]+)'
+ when: admin_conf.stat.exists == false
+ - debug:
+ var: token
+ when: admin_conf.stat.exists == false
+ - set_fact:
+ hash: "{{ results.stdout | regex_search(regexp, '\\2') | first }}"
+ vars:
+ regexp: '([^\s]+\s){6}([^\s]+)'
+ when: admin_conf.stat.exists == false
+ - debug:
+ var: hash
+ when: admin_conf.stat.exists == false
+
+ - name: Install flannel networking for RPi
+ shell: curl -sSL https://rawgit.com/coreos/flannel/v0.10.0/Documentation/kube-flannel.yml | sed "s/amd64/arm/g" | kubectl --kubeconfig=/etc/kubernetes/admin.conf create -f -
+ when:
+ - ansible_lsb.id == "Raspbian"
+ - admin_conf.stat.exists == false
+
+ - name: Install flannel networking for x86_64
+ shell: curl -sSL https://rawgit.com/coreos/flannel/v0.10.0/Documentation/kube-flannel.yml | kubectl --kubeconfig=/etc/kubernetes/admin.conf create -f -
+ when:
+ - ansible_distribution == "Debian"
+ - ansible_architecture == "x86_64"
+ - admin_conf.stat.exists == false
+
+ - debug:
+ msg: "kubeadm has probably already been run."
+ when: admin_conf.stat.exists == true
diff --git a/ansible/secrets.py b/ansible/secrets.py
new file mode 100644
index 0000000..b99cd5e
--- /dev/null
+++ b/ansible/secrets.py
@@ -0,0 +1,2 @@
+GCE_PARAMS = ('1098936731058-compute@developer.gserviceaccount.com', '/home/jonny/terraform/gcp-k8s/centos-k8s-d9557c7f6db3.json')
+GCE_KEYWORD_PARAMS = {'project': 'centos-k8s', 'datacenter': 'us-central1'}
diff --git a/ansible/test-inv b/ansible/test-inv
new file mode 100644
index 0000000..3cb42ab
--- /dev/null
+++ b/ansible/test-inv
@@ -0,0 +1,2 @@
+[master]
+192.168.11.11
diff --git a/ansible/test.yaml b/ansible/test.yaml
new file mode 100644
index 0000000..32bdb4d
--- /dev/null
+++ b/ansible/test.yaml
@@ -0,0 +1,34 @@
+---
+- name: Create the kubernetes cluster using kubeadm
+ hosts: masters
+ become: true
+ tasks:
+ - name: Check for admin.conf from kubeadm
+ stat: path=/etc/kubernetes/admin.conf
+ register: admin_conf
+ - set_fact:
+ running: admin_conf.stat.exists
+
+ - name: Create kubeadm join command
+ shell: kubeadm token create --print-join-command
+ register: results
+ when: admin_conf.stat.exists == true
+ - debug:
+ var: results.stdout
+ when: admin_conf.stat.exists == true
+ - set_fact:
+ token: "{{ results.stdout | regex_search(regexp, '\\2') | first }}"
+ vars:
+ regexp: '([^\s]+\s){4}([^\s]+)'
+ when: admin_conf.stat.exists == true
+ - debug:
+ var: token
+ when: admin_conf.stat.exists == true
+ - set_fact:
+ hash: "{{ results.stdout | regex_search(regexp, '\\1') | first }}"
+ vars:
+ regexp: '--discovery-token-ca-cert-hash ([^\s]+\s)'
+ when: admin_conf.stat.exists == true
+ - debug:
+ var: hash
+ when: admin_conf.stat.exists == true
diff --git a/ansible/work-kube-config.yaml b/ansible/work-kube-config.yaml
new file mode 100644
index 0000000..1a3cd30
--- /dev/null
+++ b/ansible/work-kube-config.yaml
@@ -0,0 +1,21 @@
+---
+- name: Set up worker nodes
+ hosts: gcp_k8s_role_worker
+ become: true
+ tasks:
+ - name: Wait for the kubelet config file to be created
+ wait_for:
+ path: /var/lib/kubelet/config.yaml
+ - name: Check for /var/lib/kubelet/config.yaml and replace cgroupfs with systemd as cgroupDriver if found
+ replace:
+ path: /var/lib/kubelet/config.yaml
+ regexp: "cgroupDriver: cgroupfs"
+ replace: "cgroupDriver: systemd"
+ register: kube_updated
+ - debug:
+ var: kube_updated
+ - name: Restart kubelet service
+ service:
+ name: kubelet
+ state: restarted
+ when: kube_updated.changed == true
diff --git a/ansible/worker-config.yaml b/ansible/worker-config.yaml
new file mode 100644
index 0000000..a21615d
--- /dev/null
+++ b/ansible/worker-config.yaml
@@ -0,0 +1,36 @@
+---
+- name: Create the kubernetes cluster using kubeadm
+ hosts: gcp_first_master_true
+ become: true
+ tasks:
+ - name: Create kubeadm join command
+ shell: kubeadm token create --print-join-command
+ register: results
+ - debug:
+ var: results.stdout
+ - set_fact:
+ running: true
+ - set_fact:
+ token: "{{ results.stdout | regex_search(regexp, '\\2') | first }}"
+ vars:
+ regexp: '([^\s]+\s){4}([^\s]+)'
+ - debug:
+ var: token
+ - set_fact:
+ hash: "{{ results.stdout | regex_search(regexp, '\\1') | first }}"
+ vars:
+ regexp: '--discovery-token-ca-cert-hash ([^\s]+)'
+ - debug:
+ var: hash
+
+- name: Set up worker nodes
+ hosts: gcp_k8s_role_worker
+ become: true
+ tasks:
+ - name: Install kubernetes on nodes
+ command: kubeadm join "{{ hostvars[item]['ansible_eth0']['ipv4']['address'] }}":6443 --token "{{ hostvars[item]['token'] }}" --discovery-token-ca-cert-hash "{{ hostvars[item]['hash'] }}"
+ when: hostvars[item]['running'] == true
+ with_items: "{{ groups['gcp_first_master_true'] }}"
+ register: join_output
+ - debug:
+ var: join_output.stdout
diff --git a/ansible/yum-config-manager.yaml b/ansible/yum-config-manager.yaml
new file mode 100644
index 0000000..00c462c
--- /dev/null
+++ b/ansible/yum-config-manager.yaml
@@ -0,0 +1,278 @@
+---
+- name: Install yum-utils
+ hosts: all
+ become: true
+ tasks:
+ - name: Install yum-utils
+ yum:
+ name: yum-utils
+ state: present
+ when:
+ ansible_distribution == "CentOS"
+
+- name: Add the kubernetes repo
+ hosts: all
+ become: true
+ tasks:
+ - name: Add kubernetes repo
+ yum_repository:
+ name: kubernetes
+ description: Kubernetes
+ baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
+ gpgcheck: 1
+ repo_gpgcheck: 1
+ gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
+ when:
+ ansible_distribution == "CentOS"
+
+- name: Set SELinux to permissive
+ hosts: all
+ become: true
+ tasks:
+ - name: Set SELinux to permissive
+ selinux:
+ policy: targeted
+ state: permissive
+ when:
+ ansible_distribution == "CentOS"
+
+- name: Install kubelet, kubeadm, kubectl
+ hosts: all
+ become: true
+ tasks:
+ - name: Install kubernetes binaries
+ yum:
+ name:
+ - kubelet
+ - kubeadm
+ - kubectl
+ state: present
+ when:
+ ansible_distribution == "CentOS"
+
+- name: Start and enable the kubelet service
+ hosts: all
+ become: true
+ tasks:
+ - name: Start and enable the kubelet service
+ service:
+ name: kubelet
+ enabled: yes
+ state: started
+ when:
+ ansible_distribution == "CentOS"
+
+- name: Add the CRI-O repo
+ hosts: all
+ become: true
+ tasks:
+ - name: Add kubernetes repo
+ yum_repository:
+ name: crio
+ description: CRI-O Repository
+ baseurl: https://cbs.centos.org/repos/paas7-crio-114-candidate/x86_64/os/
+ gpgcheck: 1
+ gpgkey: https://www.centos.org/keys/RPM-GPG-KEY-CentOS-SIG-PaaS
+ when:
+ ansible_distribution == "CentOS"
+
+- name: Install and enable cri-o
+ hosts: all
+ become: true
+ tasks:
+ - name: Install CRI-O binary
+ yum:
+ name: cri-o
+ state: present
+ disable_gpg_check: true
+ when:
+ ansible_distribution == "CentOS"
+ - name: Add the CNI plugin directory to crio.conf
+ lineinfile:
+ path: /etc/crio/crio.conf
+ insertafter: '"/usr/libexec/cni",'
+ line: '"/opt/cni/bin"'
+ state: present
+ - name: Fix the crio-wipe lib.bash script (seems to be broken in current CRI-O build)
+ replace:
+ path: /usr/libexec/crio/crio-wipe/lib.bash
+ regexp: '\"\$1\"'
+ replace: '$1'
+ - name: Fix the crio-wipe lib.bash script (seems to be broken in current CRI-O build)
+ replace:
+ path: /usr/libexec/crio/crio-wipe/lib.bash
+ regexp: '\"\$2\"'
+ replace: '$2'
+ - name: Start and enable the cri-o service
+ service:
+ name: crio
+ enabled: yes
+ state: restarted
+ when:
+ ansible_distribution == "CentOS"
+
+- name: Load necessary kernel modules
+ hosts: all
+ become: true
+ tasks:
+ - name: Load br_netfilter and overlay kernel modules
+ modprobe:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - "br_netfilter"
+ - "overlay"
+ when:
+ ansible_distribution == "CentOS"
+
+- name: Set the sysctl values for networking
+ hosts: all
+ become: true
+ tasks:
+ - name: Set the iptables bridge parameter
+ sysctl:
+ name: net.bridge.bridge-nf-call-iptables
+ value: 1
+ sysctl_set: yes
+ state: present
+ sysctl_file: /etc/sysctl.d/99-k8s.conf
+ - name: Set the ip_forward parameter
+ sysctl:
+ name: net.ipv4.ip_forward
+ value: 1
+ sysctl_set: yes
+ state: present
+ sysctl_file: /etc/sysctl.d/99-k8s.conf
+ - name: Set the IPv6 iptables bridge parameter
+ sysctl:
+ name: net.bridge.bridge-nf-call-ip6tables
+ value: 1
+ sysctl_set: yes
+ state: present
+ sysctl_file: /etc/sysctl.d/99-k8s.conf
+
+- name: Create the server side firewall rules
+ hosts: gcp_k8s_role_master
+ become: true
+ tasks:
+ - name: Create the firewalld rule for the API, etcd, kubelet API, scheduler, and controller-manager services for k8s
+ firewalld:
+ port: "{{ item }}"
+ state: enabled
+ permanent: true
+ with_items:
+ - "6443/tcp"
+ - "2379-2380/tcp"
+ - "9537/tcp"
+ - "10250/tcp"
+ - "10251/tcp"
+ - "10252/tcp"
+
+- name: Create the server side firewall rules
+ hosts: gcp_k8s_role_worker
+ become: true
+ tasks:
+ - name: Create the firewalld rule for the API, etcd, kubelet API, scheduler, and controller-manager services for k8s
+ firewalld:
+ port: "{{ item }}"
+ state: enabled
+ permanent: true
+ with_items:
+ - "80/tcp"
+ - "443/tcp"
+ - "10250/tcp"
+ - "30000-32767/tcp"
+
+- name: Create the kubernetes cluster using kubeadm
+ hosts: gcp_first_master_true
+ become: true
+ tasks:
+ - name: Check for admin.conf from kubeadm
+ stat: path=/etc/kubernetes/admin.conf
+ register: admin_conf
+ - set_fact:
+ running: true
+ when: admin_conf.stat.exists == false
+ - debug:
+ var: running
+ when: admin_conf.stat.exists == false
+ - name: Run kubeadm if admin.conf doesn't exist
+ command: kubeadm init --ignore-preflight-errors all --cri-socket /run/crio/crio.sock # Add this if using flannel networking --pod-network-cidr 10.244.0.0/16
+ async: 180
+ poll: 0
+ register: kubeadm_running
+ when: admin_conf.stat.exists == false
+ - name: Wait for the kubelet config file to be created
+ wait_for:
+ path: /var/lib/kubelet/config.yaml
+ when: admin_conf.stat.exists == false
+ - name: Check for /var/lib/kubelet/config.yaml and replace cgroupfs with systemd as cgroupDriver if found
+ replace:
+ path: /var/lib/kubelet/config.yaml
+ regexp: "cgroupDriver: cgroupfs"
+ replace: "cgroupDriver: systemd"
+ when: admin_conf.stat.exists == false
+ - name: Restart kubelet service
+ service:
+ name: kubelet
+ state: restarted
+
+ - name: Check that kubeadm has completed
+ async_status:
+ jid: "{{ kubeadm_running.ansible_job_id }}"
+ when: admin_conf.stat.exists == false
+ register: job_result
+ until: job_result.finished
+ retries: 30
+
+ - name: Create kubeadm join command
+ shell: kubeadm token create --print-join-command
+ register: results
+ when: admin_conf.stat.exists == false
+ - debug:
+ var: results.stdout
+ when: admin_conf.stat.exists == false
+ - set_fact:
+ token: "{{ results.stdout | regex_search(regexp, '\\2') | first }}"
+ vars:
+ regexp: '([^\s]+\s){4}([^\s]+)'
+ when: admin_conf.stat.exists == false
+ - debug:
+ var: token
+ when: admin_conf.stat.exists == false
+ - set_fact:
+ hash: "{{ results.stdout | regex_search(regexp, '\\1') | first }}"
+ vars:
+ regexp: '--discovery-token-ca-cert-hash ([^\s]+)'
+ when: admin_conf.stat.exists == false
+ - debug:
+ var: hash
+ when: admin_conf.stat.exists == false
+ - name: Install weave networking for x86_64
+ shell: curl -sSL "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" | kubectl --kubeconfig=/etc/kubernetes/admin.conf create -f -
+ when:
+ - admin_conf.stat.exists == false
+
+- name: Set up worker nodes
+ hosts: gcp_k8s_role_worker
+ become: true
+ tasks:
+ - name: Install kubernetes on nodes
+ command: kubeadm join "{{ hostvars[item]['ansible_eth0']['ipv4']['address'] }}":6443 --token "{{ hostvars[item]['token'] }}" --discovery-token-ca-cert-hash "{{ hostvars[item]['hash'] }}"
+ when: hostvars[item]['running'] == true
+ with_items: "{{ groups['gcp_first_master_true'] }}"
+
+ - name: Wait for the kubelet config file to be created
+ wait_for:
+ path: /var/lib/kubelet/config.yaml
+ - name: Check for /var/lib/kubelet/config.yaml and replace cgroupfs with systemd as cgroupDriver if found
+ replace:
+ path: /var/lib/kubelet/config.yaml
+ regexp: "cgroupDriver: cgroupfs"
+ replace: "cgroupDriver: systemd"
+ register: kube_updated
+ - name: Restart kubelet service
+ service:
+ name: kubelet
+ state: restarted
+ when: kube_updated.changed == true
diff --git a/gcp-lb/main.tf b/gcp-lb/main.tf
new file mode 100644
index 0000000..a3d7d5f
--- /dev/null
+++ b/gcp-lb/main.tf
@@ -0,0 +1,46 @@
+# ---------------------------------------------------------------------------------------------------------------------
+# LAUNCH A NETWORK LOAD BALANCER
+# ---------------------------------------------------------------------------------------------------------------------
+
+terraform {
+ # The modules used in this example have been updated with 0.12 syntax, which means the example is no longer
+ # compatible with any versions below 0.12.
+ required_version = ">= 0.12"
+}
+
+# ------------------------------------------------------------------------------
+# CONFIGURE OUR GCP CONNECTION
+# ------------------------------------------------------------------------------
+
+#provider "google-beta" {
+# version = "~> 2.7.0"
+# region = var.region
+# project = var.project
+#}
+
+# ------------------------------------------------------------------------------
+# CREATE THE INTERNAL TCP LOAD BALANCER
+# ------------------------------------------------------------------------------
+
+module "lb" {
+ # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you
+ # to a specific version of the modules, such as the following example:
+ # source = "github.com/gruntwork-io/terraform-google-load-balancer.git//modules/network-load-balancer?ref=v0.2.0"
+ source = "github.com/gruntwork-io/terraform-google-load-balancer.git//modules/network-load-balancer?ref=v0.2.1"
+ #source = "../../modules/network-load-balancer"
+
+ name = var.name
+ region = var.region
+ project = var.project
+
+ enable_health_check = true
+ health_check_port = "6443"
+ health_check_path = "/api"
+
+ firewall_target_tags = [var.name]
+
+# instances = [google_compute_instance.k8s-master.self_link]
+ instances = "${var.gce-vms}"
+
+ custom_labels = var.custom_labels
+}
diff --git a/gcp-lb/outputs.tf b/gcp-lb/outputs.tf
new file mode 100644
index 0000000..8401a77
--- /dev/null
+++ b/gcp-lb/outputs.tf
@@ -0,0 +1,8 @@
+# ------------------------------------------------------------------------------
+# LOAD BALANCER OUTPUTS
+# ------------------------------------------------------------------------------
+
+output "load_balancer_ip_address" {
+ description = "Internal IP address of the load balancer"
+ value = module.lb.load_balancer_ip_address
+}
diff --git a/gcp-lb/provider.tf b/gcp-lb/provider.tf
new file mode 100644
index 0000000..069598b
--- /dev/null
+++ b/gcp-lb/provider.tf
@@ -0,0 +1,12 @@
+provider "google" {
+ credentials = file("centos-k8s-d9557c7f6db3.json")
+ project = "${var.project}"
+ region = "${var.region}"
+}
+
+provider "google-beta" {
+ credentials = file("centos-k8s-d9557c7f6db3.json")
+ project = "${var.project}"
+ region = "${var.region}"
+}
+
diff --git a/gcp-lb/variables.tf b/gcp-lb/variables.tf
new file mode 100644
index 0000000..296630d
--- /dev/null
+++ b/gcp-lb/variables.tf
@@ -0,0 +1,44 @@
+# ---------------------------------------------------------------------------------------------------------------------
+# REQUIRED PARAMETERS
+# These variables are expected to be passed in by the operator
+# ---------------------------------------------------------------------------------------------------------------------
+
+variable "project" {
+ description = "The project ID to create the resources in."
+ type = string
+ default = "centos-k8s"
+}
+
+variable "region" {
+ description = "The region to create the resources in."
+ type = string
+ default = "us-central1" # Iowa
+}
+
+variable "zone" {
+ description = "The GCP zone to create the sample compute instances in. Must within the region specified in 'var.region'"
+ type = string
+ default = "us-central1-a"
+}
+
+# ---------------------------------------------------------------------------------------------------------------------
+# OPTIONAL PARAMETERS
+# These variables have defaults, but may be overridden by the operator.
+# ---------------------------------------------------------------------------------------------------------------------
+
+variable "name" {
+ description = "Name for the load balancer forwarding rule and prefix for supporting resources."
+ type = string
+ default = "k8s-lb"
+}
+
+variable "custom_labels" {
+ description = "A map of custom labels to apply to the resources. The key is the label name and the value is the label value."
+ type = map(string)
+ default = {}
+}
+
+variable "gce-vms" {
+ description = "Self link to VM for LB"
+ type = list
+}
diff --git a/k8s-master/firewall.tf b/k8s-master/firewall.tf
new file mode 100644
index 0000000..63eb947
--- /dev/null
+++ b/k8s-master/firewall.tf
@@ -0,0 +1,169 @@
+resource "google_compute_firewall" "ssh" {
+ name = "${var.network}-firewall-ssh"
+ network = google_compute_network.k8s-network.name
+
+ allow {
+ protocol = "tcp"
+ ports = ["22"]
+ }
+
+ target_tags = ["${var.network}-firewall-ssh"]
+ source_ranges = ["0.0.0.0/0"]
+}
+
+resource "google_compute_firewall" "http" {
+ name = "${var.network}-firewall-http"
+ network = google_compute_network.k8s-network.name
+
+ allow {
+ protocol = "tcp"
+ ports = ["80"]
+ }
+
+ target_tags = ["${var.network}-firewall-http"]
+ source_ranges = ["0.0.0.0/0"]
+}
+
+resource "google_compute_firewall" "https" {
+ name = "${var.network}-firewall-https"
+ network = google_compute_network.k8s-network.name
+
+ allow {
+ protocol = "tcp"
+ ports = ["443"]
+ }
+
+ target_tags = ["${var.network}-firewall-https"]
+ source_ranges = ["0.0.0.0/0"]
+}
+
+resource "google_compute_firewall" "icmp" {
+ name = "${var.network}-firewall-icmp"
+ network = google_compute_network.k8s-network.name
+
+ allow {
+ protocol = "icmp"
+ }
+
+ target_tags = ["${var.network}-firewall-icmp"]
+ source_ranges = ["0.0.0.0/0"]
+}
+
+resource "google_compute_firewall" "postgresql" {
+ name = "${var.network}-firewall-postgresql"
+ network = google_compute_network.k8s-network.name
+
+ allow {
+ protocol = "tcp"
+ ports = ["5432"]
+ }
+
+ target_tags = ["${var.network}-firewall-postgresql"]
+ source_ranges = ["0.0.0.0/0"]
+}
+
+resource "google_compute_firewall" "firewall-openshift-console" {
+ name = "${var.network}-firewall-openshift-console"
+ network = google_compute_network.k8s-network.name
+
+ allow {
+ protocol = "tcp"
+ ports = ["8443"]
+ }
+
+ target_tags = ["${var.network}-firewall-openshift-console"]
+ source_ranges = ["0.0.0.0/0"]
+}
+
+resource "google_compute_firewall" "firewall-secure-forward" {
+ name = "${var.network}-firewall-secure-forward"
+ network = google_compute_network.k8s-network.name
+
+ allow {
+ protocol = "tcp"
+ ports = ["24284"]
+ }
+
+ target_tags = ["${var.network}-firewall-secure-forward"]
+ source_ranges = ["0.0.0.0/0"]
+}
+
+resource "google_compute_firewall" "firewall-k8s-apiserver" {
+ name = "${var.network}-firewall-k8s-apiserver"
+ network = google_compute_network.k8s-network.name
+
+ allow {
+ protocol = "tcp"
+ ports = ["6443"]
+ }
+
+ target_tags = ["${var.network}-firewall-k8s-apiserver"]
+ source_ranges = ["0.0.0.0/0"]
+}
+
+resource "google_compute_firewall" "firewall-k8s-etcd-api" {
+ name = "${var.network}-firewall-k8s-etcd-api"
+ network = google_compute_network.k8s-network.name
+
+ allow {
+ protocol = "tcp"
+ ports = ["2379",
+ "2380",
+ ]
+ }
+
+ target_tags = ["${var.network}-firewall-k8s-etcd-api"]
+ source_ranges = ["0.0.0.0/0"]
+}
+
+resource "google_compute_firewall" "firewall-k8s-kubelet-api" {
+ name = "${var.network}-firewall-k8s-kubelet-api"
+ network = google_compute_network.k8s-network.name
+
+ allow {
+ protocol = "tcp"
+ ports = ["10250"]
+ }
+
+ target_tags = ["${var.network}-firewall-k8s-kubelet-api"]
+ source_ranges = ["0.0.0.0/0"]
+}
+
+resource "google_compute_firewall" "firewall-k8s-kube-scheduler" {
+ name = "${var.network}-firewall-k8s-kube-scheduler"
+ network = google_compute_network.k8s-network.name
+
+ allow {
+ protocol = "tcp"
+ ports = ["10251"]
+ }
+
+ target_tags = ["${var.network}-firewall-k8s-kube-scheduler"]
+ source_ranges = ["0.0.0.0/0"]
+}
+
+resource "google_compute_firewall" "firewall-k8s-kube-controller" {
+ name = "${var.network}-firewall-k8s-kube-controller"
+ network = google_compute_network.k8s-network.name
+
+ allow {
+ protocol = "tcp"
+ ports = ["10252"]
+ }
+
+ target_tags = ["${var.network}-firewall-k8s-kube-controller"]
+ source_ranges = ["0.0.0.0/0"]
+}
+
+resource "google_compute_firewall" "firewall-k8s-nodeports" {
+ name = "${var.network}-firewall-k8s-nodeports"
+ network = google_compute_network.k8s-network.name
+
+ allow {
+ protocol = "tcp"
+ ports = ["30000-32676"]
+ }
+
+ target_tags = ["${var.network}-firewall-k8s-nodeports"]
+ source_ranges = ["0.0.0.0/0"]
+}
diff --git a/k8s-master/main.tf b/k8s-master/main.tf
new file mode 100644
index 0000000..2eb20c3
--- /dev/null
+++ b/k8s-master/main.tf
@@ -0,0 +1,104 @@
+resource "google_compute_instance" "k8s-master" {
+ count = 1
+ name = "k8s-master-${count.index + 1}"
+
+ ## for a setup having multiple instances of the same type, you can do
+ ## the following, there would be 2 instances of the same configuration
+ ## provisioned
+ # count = 2
+ # name = "${var.instance-name}-${count.index}"
+ machine_type = var.vm_type["3point75gig"]
+
+ zone = var.region
+
+ tags = [
+ "${var.network}-firewall-ssh",
+ "${var.network}-firewall-icmp",
+ "${var.network}-firewall-k8s-apiserver",
+ "${var.network}-firewall-k8s-etcd-api",
+ "${var.network}-firewall-k8s-kubelet-api",
+ "${var.network}-firewall-k8s-kube-scheduler",
+ "${var.network}-firewall-k8s-kube-controller",
+ ]
+
+ labels = {
+ k8s-role = "master"
+ first-master = "true"
+ }
+
+ boot_disk {
+ auto_delete = "true"
+ initialize_params {
+ image = var.os["centos7"]
+ type = "pd-standard"
+ }
+ }
+
+ metadata = {
+ hostname = "k8s-master.ervine.org"
+ startup-script = <