summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.dockerignore2
-rw-r--r--images/installer/Dockerfile6
-rwxr-xr-ximages/installer/root/usr/local/bin/entrypoint-gcp51
-rwxr-xr-ximages/installer/root/usr/local/bin/user_setup2
-rw-r--r--inventory/.gitignore1
-rw-r--r--inventory/dynamic/gcp/README.md1
-rw-r--r--inventory/dynamic/gcp/ansible.cfg45
-rw-r--r--inventory/dynamic/gcp/group_vars/all/00_defaults.yml42
-rwxr-xr-xinventory/dynamic/gcp/hosts.py408
-rwxr-xr-xinventory/dynamic/gcp/hosts.sh15
-rw-r--r--inventory/dynamic/gcp/none1
-rw-r--r--inventory/dynamic/injected/README.md3
-rw-r--r--openshift-ansible.spec6
-rw-r--r--playbooks/aws/openshift-cluster/hosted.yml25
-rw-r--r--playbooks/aws/openshift-cluster/install.yml27
-rw-r--r--playbooks/aws/openshift-cluster/provision_install.yml4
-rw-r--r--playbooks/common/private/components.yml38
-rw-r--r--playbooks/common/private/control_plane.yml34
-rw-r--r--playbooks/deploy_cluster.yml37
-rw-r--r--playbooks/gcp/openshift-cluster/build_base_image.yml162
-rw-r--r--playbooks/gcp/openshift-cluster/build_image.yml106
-rw-r--r--playbooks/gcp/openshift-cluster/deprovision.yml10
-rw-r--r--playbooks/gcp/openshift-cluster/install.yml33
-rw-r--r--playbooks/gcp/openshift-cluster/install_gcp.yml21
-rw-r--r--playbooks/gcp/openshift-cluster/inventory.yml10
-rw-r--r--playbooks/gcp/openshift-cluster/launch.yml12
-rw-r--r--playbooks/gcp/openshift-cluster/provision.yml (renamed from playbooks/gcp/provision.yml)9
-rw-r--r--playbooks/gcp/openshift-cluster/publish_image.yml9
l---------playbooks/gcp/openshift-cluster/roles1
-rw-r--r--roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-policy.yaml10
-rw-r--r--roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-role.yaml21
-rw-r--r--roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-serviceaccount.yaml5
-rw-r--r--roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller.yaml68
-rw-r--r--roles/openshift_bootstrap_autoapprover/tasks/main.yml28
-rw-r--r--roles/openshift_cloud_provider/tasks/gce.yml10
-rw-r--r--roles/openshift_gcp/files/bootstrap-script.sh42
-rw-r--r--roles/openshift_gcp/files/openshift-bootstrap-update.service7
-rw-r--r--roles/openshift_gcp/files/openshift-bootstrap-update.timer10
-rw-r--r--roles/openshift_gcp/files/partition.conf (renamed from roles/openshift_gcp_image_prep/files/partition.conf)2
-rw-r--r--roles/openshift_gcp/meta/main.yml17
-rw-r--r--roles/openshift_gcp/tasks/add_custom_repositories.yml20
-rw-r--r--roles/openshift_gcp/tasks/configure_gcp_base_image.yml (renamed from roles/openshift_gcp_image_prep/tasks/main.yaml)14
-rw-r--r--roles/openshift_gcp/tasks/configure_master_bootstrap.yml36
-rw-r--r--roles/openshift_gcp/tasks/configure_master_healthcheck.yml19
-rw-r--r--roles/openshift_gcp/tasks/dynamic_inventory.yml5
-rw-r--r--roles/openshift_gcp/tasks/frequent_log_rotation.yml18
-rw-r--r--roles/openshift_gcp/tasks/main.yml (renamed from roles/openshift_gcp/tasks/main.yaml)4
-rw-r--r--roles/openshift_gcp/tasks/node_cloud_config.yml12
-rw-r--r--roles/openshift_gcp/tasks/publish_image.yml32
-rw-r--r--roles/openshift_gcp/tasks/setup_scale_group_facts.yml44
-rw-r--r--roles/openshift_gcp/templates/inventory.j2.sh8
-rw-r--r--roles/openshift_gcp/templates/master_healthcheck.j268
-rw-r--r--roles/openshift_gcp/templates/openshift-bootstrap-update.j27
-rw-r--r--roles/openshift_gcp/templates/provision.j2.sh17
-rw-r--r--roles/openshift_gcp/templates/yum_repo.j220
-rw-r--r--roles/openshift_web_console/tasks/update_console_config.yml3
56 files changed, 1552 insertions, 116 deletions
diff --git a/.dockerignore b/.dockerignore
index 0a70c5bfa..2509d48b5 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -2,7 +2,7 @@
bin
docs
hack
-inventory
+inventory/hosts.*
test
utils
**/*.md
diff --git a/images/installer/Dockerfile b/images/installer/Dockerfile
index b1390480a..22a0d06a0 100644
--- a/images/installer/Dockerfile
+++ b/images/installer/Dockerfile
@@ -8,12 +8,14 @@ USER root
COPY images/installer/origin-extra-root /
# install ansible and deps
-RUN INSTALL_PKGS="python-lxml pyOpenSSL python2-cryptography openssl java-1.8.0-openjdk-headless python2-passlib httpd-tools openssh-clients origin-clients" \
+RUN INSTALL_PKGS="python-lxml python-dns pyOpenSSL python2-cryptography openssl java-1.8.0-openjdk-headless python2-passlib httpd-tools openssh-clients origin-clients" \
&& yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \
&& EPEL_PKGS="ansible python2-boto python2-boto3 google-cloud-sdk-183.0.0 which" \
&& yum install -y epel-release \
&& yum install -y --setopt=tsflags=nodocs $EPEL_PKGS \
- && rpm -V $INSTALL_PKGS $EPEL_PKGS \
+ && EPEL_TESTING_PKGS="python2-libcloud" \
+ && yum install -y --enablerepo=epel-testing --setopt=tsflags=nodocs $EPEL_TESTING_PKGS \
+ && rpm -V $INSTALL_PKGS $EPEL_PKGS $EPEL_TESTING_PKGS \
&& yum clean all
LABEL name="openshift/origin-ansible" \
diff --git a/images/installer/root/usr/local/bin/entrypoint-gcp b/images/installer/root/usr/local/bin/entrypoint-gcp
new file mode 100755
index 000000000..d0ffd9904
--- /dev/null
+++ b/images/installer/root/usr/local/bin/entrypoint-gcp
@@ -0,0 +1,51 @@
+#!/bin/bash
+#
+# This file sets up the user to run in the GCP environment.
+# It provides dynamic inventory that works well when run in
+# a container environment by setting up a default inventory.
+# It assumes the user has provided a GCP service account token
+# and ssh-privatekey file at "$(pwd)/inventory/dynamic/injected"
+# and automatically links any YAML files found into the group
+# vars directory, which allows the playbook to more easily be
+# run in containerized contexts.
+
+WORK=$(pwd)
+FILES="${WORK}/inventory/dynamic/injected"
+
+# Patch /etc/passwd file with the current user info.
+# The current user's entry must be correctly defined in this file in order for
+# the `ssh` command to work within the created container.
+
+if ! whoami &>/dev/null; then
+ echo "${USER:-default}:x:$(id -u):$(id -g):Default User:$HOME:/sbin/nologin" >> /etc/passwd
+fi
+
+# Provide a "files_dir" variable that points to inventory/dynamic/injected
+echo "files_dir: \"${FILES}\"" > "${WORK}/inventory/dynamic/gcp/group_vars/all/00_default_files_dir.yml"
+# Add any injected variable files into the group vars directory
+find "${FILES}" -name '*.yml' -or -name '*.yaml' -or -name vars | xargs -L1 -I {} ln -fs {} "${WORK}/inventory/dynamic/gcp/group_vars/all"
+# Avoid sudo when running locally - nothing in the image requires it.
+mkdir -p "${WORK}/inventory/dynamic/gcp/host_vars/localhost"
+echo "ansible_become: no" > "${WORK}/inventory/dynamic/gcp/host_vars/localhost/00_skip_root.yaml"
+
+if [[ -z "${ANSIBLE_CONFIG-}" ]]; then
+ export ANSIBLE_CONFIG="${WORK}/inventory/dynamic/gcp/ansible.cfg"
+fi
+
+# SSH requires the file to be owned by the current user, but Docker copies
+# files in as root. Put the file into the ssh dir with the right permissions
+if [[ -f "${FILES}/ssh-privatekey" ]]; then
+ keyfile="${HOME}/.ssh/google_compute_engine"
+ mkdir "${HOME}/.ssh"
+ rm -f "${keyfile}"
+ cat "${FILES}/ssh-privatekey" > "${keyfile}"
+ chmod 0600 "${keyfile}"
+ ssh-keygen -y -f "${keyfile}" > "${keyfile}.pub"
+fi
+if [[ -f "${FILES}/gce.json" ]]; then
+ gcloud auth activate-service-account --key-file="${FILES}/gce.json"
+else
+ echo "No service account file found at ${FILES}/gce.json, bypassing login"
+fi
+
+exec "$@" \ No newline at end of file
diff --git a/images/installer/root/usr/local/bin/user_setup b/images/installer/root/usr/local/bin/user_setup
index b76e60a4d..dba0af3e4 100755
--- a/images/installer/root/usr/local/bin/user_setup
+++ b/images/installer/root/usr/local/bin/user_setup
@@ -12,6 +12,8 @@ chmod g+rw /etc/passwd
# ensure that the ansible content is accessible
chmod -R g+r ${WORK_DIR}
find ${WORK_DIR} -type d -exec chmod g+x {} +
+# ensure that the dynamic inventory dir can have content created
+find ${WORK_DIR} -type d -exec chmod g+wx {} +
# no need for this script to remain in the image after running
rm $0
diff --git a/inventory/.gitignore b/inventory/.gitignore
index 6ff331c7e..97aa044f6 100644
--- a/inventory/.gitignore
+++ b/inventory/.gitignore
@@ -1 +1,2 @@
hosts
+/dynamic/gcp/group_vars/all/00_default_files_dir.yml \ No newline at end of file
diff --git a/inventory/dynamic/gcp/README.md b/inventory/dynamic/gcp/README.md
new file mode 100644
index 000000000..217a035ca
--- /dev/null
+++ b/inventory/dynamic/gcp/README.md
@@ -0,0 +1 @@
+This directory provides dynamic inventory for a GCP cluster configured via the GCP provisioning playbook. Set inventory to `inventory/dynamic/gcp/hosts.sh` to calculate the appropriate host set. \ No newline at end of file
diff --git a/inventory/dynamic/gcp/ansible.cfg b/inventory/dynamic/gcp/ansible.cfg
new file mode 100644
index 000000000..f87d51f28
--- /dev/null
+++ b/inventory/dynamic/gcp/ansible.cfg
@@ -0,0 +1,45 @@
+# config file for ansible -- http://ansible.com/
+# ==============================================
+
+# This config file provides examples for running
+# the OpenShift playbooks with the provided
+# inventory scripts.
+
+[defaults]
+# Set the log_path
+#log_path = /tmp/ansible.log
+
+private_key_file = $HOME/.ssh/google_compute_engine
+
+# Additional default options for OpenShift Ansible
+forks = 50
+host_key_checking = False
+retry_files_enabled = False
+retry_files_save_path = ~/ansible-installer-retries
+nocows = True
+remote_user = cloud-user
+roles_path = ../../../roles/
+gathering = smart
+fact_caching = jsonfile
+fact_caching_connection = $HOME/ansible/facts
+fact_caching_timeout = 600
+callback_whitelist = profile_tasks
+inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt
+# work around privilege escalation timeouts in ansible:
+timeout = 30
+
+# Uncomment to use the provided example inventory
+inventory = hosts.sh
+
+[inventory]
+# fail more helpfully when the inventory file does not parse (Ansible 2.4+)
+unparsed_is_failed=true
+
+# Additional ssh options for OpenShift Ansible
+[ssh_connection]
+pipelining = True
+ssh_args = -o ControlMaster=auto -o ControlPersist=600s
+timeout = 10
+# shorten the ControlPath which is often too long; when it is,
+# ssh connection reuse silently fails, making everything slower.
+control_path = %(directory)s/%%h-%%r
diff --git a/inventory/dynamic/gcp/group_vars/all/00_defaults.yml b/inventory/dynamic/gcp/group_vars/all/00_defaults.yml
new file mode 100644
index 000000000..2f72e905f
--- /dev/null
+++ b/inventory/dynamic/gcp/group_vars/all/00_defaults.yml
@@ -0,0 +1,42 @@
+# GCP uses non-root users by default, so sudo by default
+---
+ansible_become: yes
+
+openshift_deployment_type: origin
+
+# Debugging settings
+debug_level: 2
+openshift_debug_level: "{{ debug_level }}"
+openshift_master_debug_level: "{{ master_debug_level | default(debug_level, true) }}"
+openshift_node_debug_level: "{{ node_debug_level | default(debug_level, true) }}"
+
+# External API settings
+console_port: 443
+internal_console_port: 8443
+openshift_master_api_port: "8443"
+openshift_master_console_port: "8443"
+openshift_master_cluster_hostname: "internal-openshift-master.{{ public_hosted_zone }}"
+openshift_master_cluster_public_hostname: "openshift-master.{{ public_hosted_zone }}"
+openshift_master_default_subdomain: "{{ wildcard_zone }}"
+
+# Cloud specific settings
+openshift_cloudprovider_kind: gce
+openshift_hosted_registry_storage_provider: gcs
+
+openshift_master_access_token_max_seconds: 2419200
+openshift_master_identity_providers:
+
+# Networking settings
+openshift_node_port_range: 30000-32000
+openshift_node_open_ports: [{"service":"Router stats port", "port":"1936/tcp"}, {"service":"Allowed open host ports", "port":"9000-10000/tcp"}, {"service":"Allowed open host ports", "port":"9000-10000/udp"}]
+openshift_node_sdn_mtu: 1410
+osm_cluster_network_cidr: 172.16.0.0/16
+osm_host_subnet_length: 9
+openshift_portal_net: 172.30.0.0/16
+
+# Default cluster configuration
+openshift_master_cluster_method: native
+openshift_schedulable: true
+# TODO: change to upstream conventions
+openshift_hosted_infra_selector: "role=infra"
+osm_default_node_selector: "role=app"
diff --git a/inventory/dynamic/gcp/hosts.py b/inventory/dynamic/gcp/hosts.py
new file mode 100755
index 000000000..cd1262622
--- /dev/null
+++ b/inventory/dynamic/gcp/hosts.py
@@ -0,0 +1,408 @@
+#!/usr/bin/env python
+# Copyright 2013 Google Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# This is a derivative of gce.py that adds support for filtering
+# the returned inventory to only include instances that have tags
+# as specified by GCE_TAGGED_INSTANCES. This prevents dynamic
+# inventory for multiple clusters within the same project from
+# accidentally stomping each other.
+
+# pylint: skip-file
+
+'''
+GCE external inventory script
+=================================
+
+Generates inventory that Ansible can understand by making API requests
+Google Compute Engine via the libcloud library. Full install/configuration
+instructions for the gce* modules can be found in the comments of
+ansible/test/gce_tests.py.
+
+When run against a specific host, this script returns the following variables
+based on the data obtained from the libcloud Node object:
+ - gce_uuid
+ - gce_id
+ - gce_image
+ - gce_machine_type
+ - gce_private_ip
+ - gce_public_ip
+ - gce_name
+ - gce_description
+ - gce_status
+ - gce_zone
+ - gce_tags
+ - gce_metadata
+ - gce_network
+
+When run in --list mode, instances are grouped by the following categories:
+ - zone:
+ zone group name examples are us-central1-b, europe-west1-a, etc.
+ - instance tags:
+ An entry is created for each tag. For example, if you have two instances
+ with a common tag called 'foo', they will both be grouped together under
+ the 'tag_foo' name.
+ - network name:
+ the name of the network is appended to 'network_' (e.g. the 'default'
+ network will result in a group named 'network_default')
+ - machine type
+ types follow a pattern like n1-standard-4, g1-small, etc.
+ - running status:
+ group name prefixed with 'status_' (e.g. status_running, status_stopped,..)
+ - image:
+ when using an ephemeral/scratch disk, this will be set to the image name
+ used when creating the instance (e.g. debian-7-wheezy-v20130816). when
+ your instance was created with a root persistent disk it will be set to
+ 'persistent_disk' since there is no current way to determine the image.
+
+Examples:
+ Execute uname on all instances in the us-central1-a zone
+ $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
+
+ Use the GCE inventory script to print out instance specific information
+ $ contrib/inventory/gce.py --host my_instance
+
+Author: Eric Johnson <erjohnso@google.com>
+Contributors: Matt Hite <mhite@hotmail.com>
+Version: 0.0.2
+'''
+
+__requires__ = ['pycrypto>=2.6']
+try:
+ import pkg_resources
+except ImportError:
+ # Use pkg_resources to find the correct versions of libraries and set
+ # sys.path appropriately when there are multiversion installs. We don't
+ # fail here as there is code that better expresses the errors where the
+ # library is used.
+ pass
+
+USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
+USER_AGENT_VERSION="v2"
+
+import sys
+import os
+import time
+import argparse
+import ConfigParser
+
+import logging
+logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import ResourceNotFoundError
+ _ = Provider.GCE
+except:
+ sys.exit("GCE inventory script requires libcloud >= 0.13")
+
+
+class GceInventory(object):
+ def __init__(self):
+ # Read settings and parse CLI arguments
+ self.parse_cli_args()
+ self.config = self.get_config()
+ self.driver = self.get_gce_driver()
+ self.ip_type = self.get_inventory_options()
+ if self.ip_type:
+ self.ip_type = self.ip_type.lower()
+
+ # Just display data for specific host
+ if self.args.host:
+ print(self.json_format_dict(self.node_to_dict(
+ self.get_instance(self.args.host)),
+ pretty=self.args.pretty))
+ sys.exit(0)
+
+ zones = self.parse_env_zones()
+
+ # Otherwise, assume user wants all instances grouped
+ print(self.json_format_dict(self.group_instances(zones),
+ pretty=self.args.pretty))
+ sys.exit(0)
+
+ def get_config(self):
+ """
+ Populates a SafeConfigParser object with defaults and
+ attempts to read an .ini-style configuration from the filename
+ specified in GCE_INI_PATH. If the environment variable is
+ not present, the filename defaults to gce.ini in the current
+ working directory.
+ """
+ gce_ini_default_path = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)), "gce.ini")
+ gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
+
+ # Create a ConfigParser.
+ # This provides empty defaults to each key, so that environment
+ # variable configuration (as opposed to INI configuration) is able
+ # to work.
+ config = ConfigParser.SafeConfigParser(defaults={
+ 'gce_service_account_email_address': '',
+ 'gce_service_account_pem_file_path': '',
+ 'gce_project_id': '',
+ 'libcloud_secrets': '',
+ 'inventory_ip_type': '',
+ })
+ if 'gce' not in config.sections():
+ config.add_section('gce')
+ if 'inventory' not in config.sections():
+ config.add_section('inventory')
+
+ config.read(gce_ini_path)
+
+ #########
+ # Section added for processing ini settings
+ #########
+
+ # Set the instance_states filter based on config file options
+ self.instance_states = []
+ if config.has_option('gce', 'instance_states'):
+ states = config.get('gce', 'instance_states')
+ # Ignore if instance_states is an empty string.
+ if states:
+ self.instance_states = states.split(',')
+
+ return config
+
+ def get_inventory_options(self):
+ """Determine inventory options. Environment variables always
+ take precedence over configuration files."""
+ ip_type = self.config.get('inventory', 'inventory_ip_type')
+ # If the appropriate environment variables are set, they override
+ # other configuration
+ ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
+ return ip_type
+
+ def get_gce_driver(self):
+ """Determine the GCE authorization settings and return a
+ libcloud driver.
+ """
+ # Attempt to get GCE params from a configuration file, if one
+ # exists.
+ secrets_path = self.config.get('gce', 'libcloud_secrets')
+ secrets_found = False
+ try:
+ import secrets
+ args = list(getattr(secrets, 'GCE_PARAMS', []))
+ kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
+ secrets_found = True
+ except:
+ pass
+
+ if not secrets_found and secrets_path:
+ if not secrets_path.endswith('secrets.py'):
+ err = "Must specify libcloud secrets file as "
+ err += "/absolute/path/to/secrets.py"
+ sys.exit(err)
+ sys.path.append(os.path.dirname(secrets_path))
+ try:
+ import secrets
+ args = list(getattr(secrets, 'GCE_PARAMS', []))
+ kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
+ secrets_found = True
+ except:
+ pass
+ if not secrets_found:
+ args = [
+ self.config.get('gce','gce_service_account_email_address'),
+ self.config.get('gce','gce_service_account_pem_file_path')
+ ]
+ kwargs = {'project': self.config.get('gce', 'gce_project_id')}
+
+ # If the appropriate environment variables are set, they override
+ # other configuration; process those into our args and kwargs.
+ args[0] = os.environ.get('GCE_EMAIL', args[0])
+ args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
+ kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
+
+ # Retrieve and return the GCE driver.
+ gce = get_driver(Provider.GCE)(*args, **kwargs)
+ gce.connection.user_agent_append(
+ '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
+ )
+ return gce
+
+ def parse_env_zones(self):
+ '''returns a list of comma seperated zones parsed from the GCE_ZONE environment variable.
+ If provided, this will be used to filter the results of the grouped_instances call'''
+ import csv
+ reader = csv.reader([os.environ.get('GCE_ZONE',"")], skipinitialspace=True)
+ zones = [r for r in reader]
+ return [z for z in zones[0]]
+
+ def parse_cli_args(self):
+ ''' Command line argument processing '''
+
+ parser = argparse.ArgumentParser(
+ description='Produce an Ansible Inventory file based on GCE')
+ parser.add_argument('--list', action='store_true', default=True,
+ help='List instances (default: True)')
+ parser.add_argument('--host', action='store',
+ help='Get all information about an instance')
+ parser.add_argument('--tagged', action='store',
+ help='Only include instances with this tag')
+ parser.add_argument('--pretty', action='store_true', default=False,
+ help='Pretty format (default: False)')
+ self.args = parser.parse_args()
+
+ tag_env = os.environ.get('GCE_TAGGED_INSTANCES')
+ if not self.args.tagged and tag_env:
+ self.args.tagged = tag_env
+
+ def node_to_dict(self, inst):
+ md = {}
+
+ if inst is None:
+ return {}
+
+ if inst.extra['metadata'].has_key('items'):
+ for entry in inst.extra['metadata']['items']:
+ md[entry['key']] = entry['value']
+
+ net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
+ # default to exernal IP unless user has specified they prefer internal
+ if self.ip_type == 'internal':
+ ssh_host = inst.private_ips[0]
+ else:
+ ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
+
+ return {
+ 'gce_uuid': inst.uuid,
+ 'gce_id': inst.id,
+ 'gce_image': inst.image,
+ 'gce_machine_type': inst.size,
+ 'gce_private_ip': inst.private_ips[0],
+ 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
+ 'gce_name': inst.name,
+ 'gce_description': inst.extra['description'],
+ 'gce_status': inst.extra['status'],
+ 'gce_zone': inst.extra['zone'].name,
+ 'gce_tags': inst.extra['tags'],
+ 'gce_metadata': md,
+ 'gce_network': net,
+ # Hosts don't have a public name, so we add an IP
+ 'ansible_host': ssh_host
+ }
+
+ def get_instance(self, instance_name):
+ '''Gets details about a specific instance '''
+ try:
+ return self.driver.ex_get_node(instance_name)
+ except Exception as e:
+ return None
+
+ def group_instances(self, zones=None):
+ '''Group all instances'''
+ groups = {}
+ meta = {}
+ meta["hostvars"] = {}
+
+ # list_nodes will fail if a disk is in the process of being deleted
+ # from a node, which is not uncommon if other playbooks are managing
+ # the same project. Retry if we receive a not found error.
+ nodes = []
+ tries = 0
+ while True:
+ try:
+ nodes = self.driver.list_nodes()
+ break
+ except ResourceNotFoundError:
+ tries = tries + 1
+ if tries > 15:
+ raise e
+ time.sleep(1)
+ continue
+
+ for node in nodes:
+
+ # This check filters on the desired instance states defined in the
+ # config file with the instance_states config option.
+ #
+ # If the instance_states list is _empty_ then _ALL_ states are returned.
+ #
+ # If the instance_states list is _populated_ then check the current
+ # state against the instance_states list
+ if self.instance_states and not node.extra['status'] in self.instance_states:
+ continue
+
+ name = node.name
+
+ if self.args.tagged and self.args.tagged not in node.extra['tags']:
+ continue
+
+ meta["hostvars"][name] = self.node_to_dict(node)
+
+ zone = node.extra['zone'].name
+
+ # To avoid making multiple requests per zone
+ # we list all nodes and then filter the results
+ if zones and zone not in zones:
+ continue
+
+ if groups.has_key(zone): groups[zone].append(name)
+ else: groups[zone] = [name]
+
+ tags = node.extra['tags']
+ for t in tags:
+ if t.startswith('group-'):
+ tag = t[6:]
+ else:
+ tag = 'tag_%s' % t
+ if groups.has_key(tag): groups[tag].append(name)
+ else: groups[tag] = [name]
+
+ net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
+ net = 'network_%s' % net
+ if groups.has_key(net): groups[net].append(name)
+ else: groups[net] = [name]
+
+ machine_type = node.size
+ if groups.has_key(machine_type): groups[machine_type].append(name)
+ else: groups[machine_type] = [name]
+
+ image = node.image and node.image or 'persistent_disk'
+ if groups.has_key(image): groups[image].append(name)
+ else: groups[image] = [name]
+
+ status = node.extra['status']
+ stat = 'status_%s' % status.lower()
+ if groups.has_key(stat): groups[stat].append(name)
+ else: groups[stat] = [name]
+
+ groups["_meta"] = meta
+
+ return groups
+
+ def json_format_dict(self, data, pretty=False):
+ ''' Converts a dict to a JSON object and dumps it as a formatted
+ string '''
+
+ if pretty:
+ return json.dumps(data, sort_keys=True, indent=2)
+ else:
+ return json.dumps(data)
+
+
+# Run the script
+GceInventory()
diff --git a/inventory/dynamic/gcp/hosts.sh b/inventory/dynamic/gcp/hosts.sh
new file mode 100755
index 000000000..0c88e3a6b
--- /dev/null
+++ b/inventory/dynamic/gcp/hosts.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+set -euo pipefail
+
+# Use a playbook to calculate the inventory dynamically from
+# the provided cluster variables.
+src="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+if ! out="$( ansible-playbook --inventory-file "${src}/none" ${src}/../../../playbooks/gcp/openshift-cluster/inventory.yml 2>&1 )"; then
+ echo "error: Inventory configuration failed" 1>&2
+ echo "$out" 1>&2
+ echo "{}"
+ exit 1
+fi
+source "/tmp/inventory.sh"
+exec ${src}/hosts.py
diff --git a/inventory/dynamic/gcp/none b/inventory/dynamic/gcp/none
new file mode 100644
index 000000000..9e26dfeeb
--- /dev/null
+++ b/inventory/dynamic/gcp/none
@@ -0,0 +1 @@
+{} \ No newline at end of file
diff --git a/inventory/dynamic/injected/README.md b/inventory/dynamic/injected/README.md
new file mode 100644
index 000000000..5e2e4c549
--- /dev/null
+++ b/inventory/dynamic/injected/README.md
@@ -0,0 +1,3 @@
+This directory may be used to inject inventory into openshift-ansible
+when used in a container. Other scripts like the cloud provider entrypoints
+will automatically use the content of this directory as inventory.
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index c09e14c66..719e54eb9 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -48,7 +48,8 @@ popd
%install
# Base openshift-ansible install
mkdir -p %{buildroot}%{_datadir}/%{name}
-mkdir -p %{buildroot}%{_datadir}/ansible/%{name}
+mkdir -p %{buildroot}%{_datadir}/ansible/%{name}/inventory
+cp -rp inventory/dynamic %{buildroot}%{_datadir}/ansible/%{name}/inventory
# openshift-ansible-bin install
mkdir -p %{buildroot}%{_bindir}
@@ -62,7 +63,7 @@ rm -f %{buildroot}%{python_sitelib}/openshift_ansible/gce
# openshift-ansible-docs install
# Install example inventory into docs/examples
mkdir -p docs/example-inventories
-cp inventory/* docs/example-inventories/
+cp inventory/hosts.* inventory/README.md docs/example-inventories/
# openshift-ansible-files install
cp -rp files %{buildroot}%{_datadir}/ansible/%{name}/
@@ -101,6 +102,7 @@ popd
%license LICENSE
%dir %{_datadir}/ansible/%{name}
%{_datadir}/ansible/%{name}/files
+%{_datadir}/ansible/%{name}/inventory/dynamic
%ghost %{_datadir}/ansible/%{name}/playbooks/common/openshift-master/library.rpmmoved
# ----------------------------------------------------------------------------------
diff --git a/playbooks/aws/openshift-cluster/hosted.yml b/playbooks/aws/openshift-cluster/hosted.yml
deleted file mode 100644
index 9d9ed29de..000000000
--- a/playbooks/aws/openshift-cluster/hosted.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- import_playbook: ../../openshift-hosted/private/config.yml
-
-- import_playbook: ../../openshift-metrics/private/config.yml
- when: openshift_metrics_install_metrics | default(false) | bool
-
-- import_playbook: ../../openshift-logging/private/config.yml
- when: openshift_logging_install_logging | default(false) | bool
-
-- import_playbook: ../../openshift-prometheus/private/config.yml
- when: openshift_hosted_prometheus_deploy | default(false) | bool
-
-- import_playbook: ../../openshift-service-catalog/private/config.yml
- when: openshift_enable_service_catalog | default(false) | bool
-
-- import_playbook: ../../openshift-management/private/config.yml
- when: openshift_management_install_management | default(false) | bool
-
-- name: Print deprecated variable warning message if necessary
- hosts: oo_first_master
- gather_facts: no
- tasks:
- - debug: msg="{{__deprecation_message}}"
- when:
- - __deprecation_message | default ('') | length > 0
diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml
index a3fc82f9a..938e83f5e 100644
--- a/playbooks/aws/openshift-cluster/install.yml
+++ b/playbooks/aws/openshift-cluster/install.yml
@@ -18,29 +18,8 @@
- name: run the init
import_playbook: ../../init/main.yml
-- name: perform the installer openshift-checks
- import_playbook: ../../openshift-checks/private/install.yml
+- name: configure the control plane
+ import_playbook: ../../common/private/control_plane.yml
-- name: etcd install
- import_playbook: ../../openshift-etcd/private/config.yml
-
-- name: include nfs
- import_playbook: ../../openshift-nfs/private/config.yml
- when: groups.oo_nfs_to_config | default([]) | count > 0
-
-- name: include loadbalancer
- import_playbook: ../../openshift-loadbalancer/private/config.yml
- when: groups.oo_lb_to_config | default([]) | count > 0
-
-- name: include openshift-master config
- import_playbook: ../../openshift-master/private/config.yml
-
-- name: include master additional config
- import_playbook: ../../openshift-master/private/additional_config.yml
-
-- name: include master additional config
+- name: ensure the masters are configured as nodes
import_playbook: ../../openshift-node/private/config.yml
-
-- name: include openshift-glusterfs
- import_playbook: ../../openshift-glusterfs/private/config.yml
- when: groups.oo_glusterfs_to_config | default([]) | count > 0
diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml
index f98f5be9a..bd154fa83 100644
--- a/playbooks/aws/openshift-cluster/provision_install.yml
+++ b/playbooks/aws/openshift-cluster/provision_install.yml
@@ -15,5 +15,5 @@
- name: Include the accept.yml playbook to accept nodes into the cluster
import_playbook: accept.yml
-- name: Include the hosted.yml playbook to finish the hosted configuration
- import_playbook: hosted.yml
+- name: Include the components playbook to finish the hosted configuration
+ import_playbook: ../../common/private/components.yml
diff --git a/playbooks/common/private/components.yml b/playbooks/common/private/components.yml
new file mode 100644
index 000000000..089645d07
--- /dev/null
+++ b/playbooks/common/private/components.yml
@@ -0,0 +1,38 @@
+---
+# These are the core component plays that configure the layers above the control
+# plane. A component is generally considered any part of OpenShift that runs on
+# top of the cluster and may be considered optional. Over time, much of OpenShift
+# above the Kubernetes apiserver and masters may be considered components.
+#
+# Preconditions:
+#
+# 1. The control plane is configured and reachable from nodes inside the cluster
+# 2. An admin kubeconfig file in /etc/origin/master/admin.kubeconfig that can
+# perform root level actions against the cluster
+# 3. On cloud providers, persistent volume provisioners are configured
+# 4. A subset of nodes is available to allow components to schedule - this must
+# include the masters and usually includes infra nodes.
+# 5. The init/main.yml playbook has been invoked
+
+- import_playbook: ../../openshift-glusterfs/private/config.yml
+ when: groups.oo_glusterfs_to_config | default([]) | count > 0
+
+- import_playbook: ../../openshift-hosted/private/config.yml
+
+- import_playbook: ../../openshift-web-console/private/config.yml
+ when: openshift_web_console_install | default(true) | bool
+
+- import_playbook: ../../openshift-metrics/private/config.yml
+ when: openshift_metrics_install_metrics | default(false) | bool
+
+- import_playbook: ../../openshift-logging/private/config.yml
+ when: openshift_logging_install_logging | default(false) | bool
+
+- import_playbook: ../../openshift-prometheus/private/config.yml
+ when: openshift_hosted_prometheus_deploy | default(false) | bool
+
+- import_playbook: ../../openshift-service-catalog/private/config.yml
+ when: openshift_enable_service_catalog | default(true) | bool
+
+- import_playbook: ../../openshift-management/private/config.yml
+ when: openshift_management_install_management | default(false) | bool
diff --git a/playbooks/common/private/control_plane.yml b/playbooks/common/private/control_plane.yml
new file mode 100644
index 000000000..0a5f1142b
--- /dev/null
+++ b/playbooks/common/private/control_plane.yml
@@ -0,0 +1,34 @@
+---
+# These are the control plane plays that configure a control plane on top of hosts
+# identified as masters. Over time, some of the pieces of the current control plane
+# may be moved to the components list.
+#
+# It is not required for any nodes to be configured, or passed to be configured,
+# when this playbook is invoked.
+#
+# Preconditions:
+#
+# 1. A set of machines have been identified to act as masters
+# 2. On cloud providers, a load balancer has been configured to point to the masters
+# and that load balancer has a DNS name
+# 3. The init/main.yml playbook has been invoked
+#
+# Postconditions:
+#
+# 1. The control plane is reachable from the outside of the cluster
+# 2. The master has an /etc/origin/master/admin.kubeconfig file that gives cluster-admin
+# access.
+
+- import_playbook: ../../openshift-checks/private/install.yml
+
+- import_playbook: ../../openshift-etcd/private/config.yml
+
+- import_playbook: ../../openshift-nfs/private/config.yml
+ when: groups.oo_nfs_to_config | default([]) | count > 0
+
+- import_playbook: ../../openshift-loadbalancer/private/config.yml
+ when: groups.oo_lb_to_config | default([]) | count > 0
+
+- import_playbook: ../../openshift-master/private/config.yml
+
+- import_playbook: ../../openshift-master/private/additional_config.yml
diff --git a/playbooks/deploy_cluster.yml b/playbooks/deploy_cluster.yml
index 5efdc486a..361553ee4 100644
--- a/playbooks/deploy_cluster.yml
+++ b/playbooks/deploy_cluster.yml
@@ -1,44 +1,11 @@
---
- import_playbook: init/main.yml
-- import_playbook: openshift-checks/private/install.yml
-
-- import_playbook: openshift-etcd/private/config.yml
-
-- import_playbook: openshift-nfs/private/config.yml
- when: groups.oo_nfs_to_config | default([]) | count > 0
-
-- import_playbook: openshift-loadbalancer/private/config.yml
- when: groups.oo_lb_to_config | default([]) | count > 0
-
-- import_playbook: openshift-master/private/config.yml
-
-- import_playbook: openshift-master/private/additional_config.yml
+- import_playbook: common/private/control_plane.yml
- import_playbook: openshift-node/private/config.yml
-- import_playbook: openshift-glusterfs/private/config.yml
- when: groups.oo_glusterfs_to_config | default([]) | count > 0
-
-- import_playbook: openshift-hosted/private/config.yml
-
-- import_playbook: openshift-web-console/private/config.yml
- when: openshift_web_console_install | default(true) | bool
-
-- import_playbook: openshift-metrics/private/config.yml
- when: openshift_metrics_install_metrics | default(false) | bool
-
-- import_playbook: openshift-logging/private/config.yml
- when: openshift_logging_install_logging | default(false) | bool
-
-- import_playbook: openshift-prometheus/private/config.yml
- when: openshift_hosted_prometheus_deploy | default(false) | bool
-
-- import_playbook: openshift-service-catalog/private/config.yml
- when: openshift_enable_service_catalog | default(true) | bool
-
-- import_playbook: openshift-management/private/config.yml
- when: openshift_management_install_management | default(false) | bool
+- import_playbook: common/private/components.yml
- name: Print deprecated variable warning message if necessary
hosts: oo_first_master
diff --git a/playbooks/gcp/openshift-cluster/build_base_image.yml b/playbooks/gcp/openshift-cluster/build_base_image.yml
new file mode 100644
index 000000000..75d0ddf9d
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/build_base_image.yml
@@ -0,0 +1,162 @@
+---
+# This playbook ensures that a base image is up to date with all of the required settings
+- name: Launch image build instance
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: Require openshift_gcp_root_image
+ fail:
+ msg: "A root OS image name or family is required for base image building. Please ensure `openshift_gcp_root_image` is defined."
+ when: openshift_gcp_root_image is undefined
+
+ - name: Create the image instance disk
+ gce_pd:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ name: "{{ openshift_gcp_prefix }}build-image-instance"
+ disk_type: pd-ssd
+ image: "{{ openshift_gcp_root_image }}"
+ size_gb: 10
+ state: present
+
+ - name: Launch the image build instance
+ gce:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ machine_type: n1-standard-1
+ instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: present
+ tags:
+ - build-image-instance
+ disk_auto_delete: false
+ disks:
+ - "{{ openshift_gcp_prefix }}build-image-instance"
+ register: gce
+
+ - add_host:
+ hostname: "{{ item.public_ip }}"
+ groupname: build_instance_ips
+ with_items: "{{ gce.instance_data }}"
+
+ - name: Wait for instance to respond to SSH
+ wait_for:
+ delay: 1
+ host: "{{ item.public_ip }}"
+ port: 22
+ state: started
+ timeout: 120
+ with_items: "{{ gce.instance_data }}"
+
+- name: Prepare instance content sources
+ pre_tasks:
+ - set_fact:
+ allow_rhel_subscriptions: "{{ rhsub_skip | default('no', True) | lower in ['no', 'false'] }}"
+ - set_fact:
+ using_rhel_subscriptions: "{{ (deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise'] or ansible_distribution == 'RedHat') and allow_rhel_subscriptions }}"
+ hosts: build_instance_ips
+ roles:
+ - role: rhel_subscribe
+ when: using_rhel_subscriptions
+ - role: openshift_repos
+ vars:
+ openshift_additional_repos: []
+ post_tasks:
+ - name: Add custom repositories
+ include_role:
+ name: openshift_gcp
+ tasks_from: add_custom_repositories.yml
+ - name: Add the Google Cloud repo
+ yum_repository:
+ name: google-cloud
+ description: Google Cloud Compute
+ baseurl: https://packages.cloud.google.com/yum/repos/google-cloud-compute-el7-x86_64
+ gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
+ gpgcheck: yes
+ repo_gpgcheck: yes
+ state: present
+ when: ansible_os_family == "RedHat"
+ - name: Add the jdetiber-qemu-user-static copr repo
+ yum_repository:
+ name: jdetiber-qemu-user-static
+ description: QEMU user static COPR
+ baseurl: https://copr-be.cloud.fedoraproject.org/results/jdetiber/qemu-user-static/epel-7-$basearch/
+ gpgkey: https://copr-be.cloud.fedoraproject.org/results/jdetiber/qemu-user-static/pubkey.gpg
+ gpgcheck: yes
+ repo_gpgcheck: no
+ state: present
+ when: ansible_os_family == "RedHat"
+ - name: Install qemu-user-static
+ package:
+ name: qemu-user-static
+ state: present
+ - name: Start and enable systemd-binfmt service
+ systemd:
+ name: systemd-binfmt
+ state: started
+ enabled: yes
+
+- name: Build image
+ hosts: build_instance_ips
+ pre_tasks:
+ - name: Set up core host GCP configuration
+ include_role:
+ name: openshift_gcp
+ tasks_from: configure_gcp_base_image.yml
+ roles:
+ - role: os_update_latest
+ post_tasks:
+ - name: Disable all repos on RHEL
+ command: subscription-manager repos --disable="*"
+ when: using_rhel_subscriptions
+ - name: Enable repos for packages on RHEL
+ command: subscription-manager repos --enable="rhel-7-server-rpms" --enable="rhel-7-server-extras-rpms"
+ when: using_rhel_subscriptions
+ - name: Install common image prerequisites
+ package: name={{ item }} state=latest
+ with_items:
+ # required by Ansible
+ - PyYAML
+ - docker
+ - google-compute-engine
+ - google-compute-engine-init
+ - google-config
+ - wget
+ - git
+ - net-tools
+ - bind-utils
+ - iptables-services
+ - bridge-utils
+ - bash-completion
+ - name: Clean yum metadata
+ command: yum clean all
+ args:
+ warn: no
+ when: ansible_os_family == "RedHat"
+
+- name: Commit image
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Terminate the image build instance
+ gce:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: absent
+ - name: Save the new image
+ command: gcloud --project "{{ openshift_gcp_project}}" compute images create "{{ openshift_gcp_base_image_name | default(openshift_gcp_base_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}" --source-disk "{{ openshift_gcp_prefix }}build-image-instance" --source-disk-zone "{{ openshift_gcp_zone }}" --family "{{ openshift_gcp_base_image }}"
+ - name: Remove the image instance disk
+ gce_pd:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ name: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: absent
diff --git a/playbooks/gcp/openshift-cluster/build_image.yml b/playbooks/gcp/openshift-cluster/build_image.yml
new file mode 100644
index 000000000..787de8ebc
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/build_image.yml
@@ -0,0 +1,106 @@
+---
+- name: Verify prerequisites for image build
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: Require openshift_gcp_base_image
+ fail:
+ msg: "A base image name or family is required for image building. Please ensure `openshift_gcp_base_image` is defined."
+ when: openshift_gcp_base_image is undefined
+
+- name: Launch image build instance
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: Set facts
+ set_fact:
+ openshift_node_bootstrap: True
+ openshift_master_unsupported_embedded_etcd: True
+
+ - name: Create the image instance disk
+ gce_pd:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ name: "{{ openshift_gcp_prefix }}build-image-instance"
+ disk_type: pd-ssd
+ image: "{{ openshift_gcp_base_image }}"
+ size_gb: 10
+ state: present
+
+ - name: Launch the image build instance
+ gce:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ machine_type: n1-standard-1
+ instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: present
+ tags:
+ - build-image-instance
+ disk_auto_delete: false
+ disks:
+ - "{{ openshift_gcp_prefix }}build-image-instance"
+ register: gce
+
+ - name: add host to nodes
+ add_host:
+ hostname: "{{ item.public_ip }}"
+ groupname: nodes
+ with_items: "{{ gce.instance_data }}"
+
+ - name: Wait for instance to respond to SSH
+ wait_for:
+ delay: 1
+ host: "{{ item.public_ip }}"
+ port: 22
+ state: started
+ timeout: 120
+ with_items: "{{ gce.instance_data }}"
+
+- hosts: nodes
+ tasks:
+ - name: Set facts
+ set_fact:
+ openshift_node_bootstrap: True
+
+# This is the part that installs all of the software and configs for the instance
+# to become a node.
+- import_playbook: ../../openshift-node/private/image_prep.yml
+
+# Add additional GCP specific behavior
+- hosts: nodes
+ tasks:
+ - include_role:
+ name: openshift_gcp
+ tasks_from: node_cloud_config.yml
+ - include_role:
+ name: openshift_gcp
+ tasks_from: frequent_log_rotation.yml
+
+- name: Commit image
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Terminate the image build instance
+ gce:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: absent
+ - name: Save the new image
+ command: gcloud --project "{{ openshift_gcp_project}}" compute images create "{{ openshift_gcp_image_name | default(openshift_gcp_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}" --source-disk "{{ openshift_gcp_prefix }}build-image-instance" --source-disk-zone "{{ openshift_gcp_zone }}" --family "{{ openshift_gcp_image }}"
+ - name: Remove the image instance disk
+ gce_pd:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ name: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: absent
diff --git a/playbooks/gcp/openshift-cluster/deprovision.yml b/playbooks/gcp/openshift-cluster/deprovision.yml
new file mode 100644
index 000000000..589fddd2f
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/deprovision.yml
@@ -0,0 +1,10 @@
+# This playbook terminates a running cluster
+---
+- name: Terminate running cluster and remove all supporting resources in GCE
+ hosts: localhost
+ connection: local
+ tasks:
+ - include_role:
+ name: openshift_gcp
+ vars:
+ state: absent
diff --git a/playbooks/gcp/openshift-cluster/install.yml b/playbooks/gcp/openshift-cluster/install.yml
new file mode 100644
index 000000000..fb35b4348
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/install.yml
@@ -0,0 +1,33 @@
+# This playbook installs onto a provisioned cluster
+---
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: place all scale groups into Ansible groups
+ include_role:
+ name: openshift_gcp
+ tasks_from: setup_scale_group_facts.yml
+
+- name: run the init
+ import_playbook: ../../init/main.yml
+
+- name: configure the control plane
+ import_playbook: ../../common/private/control_plane.yml
+
+- name: ensure the masters are configured as nodes
+ import_playbook: ../../openshift-node/private/config.yml
+
+- name: run the GCP specific post steps
+ import_playbook: install_gcp.yml
+
+- name: install components
+ import_playbook: ../../common/private/components.yml
+
+- hosts: primary_master
+ gather_facts: no
+ tasks:
+ - name: Retrieve cluster configuration
+ fetch:
+ src: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
+ dest: "/tmp/"
+ flat: yes
diff --git a/playbooks/gcp/openshift-cluster/install_gcp.yml b/playbooks/gcp/openshift-cluster/install_gcp.yml
new file mode 100644
index 000000000..09db78971
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/install_gcp.yml
@@ -0,0 +1,21 @@
+---
+- hosts: masters
+ gather_facts: no
+ tasks:
+ - name: create master health check service
+ include_role:
+ name: openshift_gcp
+ tasks_from: configure_master_healthcheck.yml
+ - name: configure node bootstrapping
+ include_role:
+ name: openshift_gcp
+ tasks_from: configure_master_bootstrap.yml
+ when:
+ - openshift_master_bootstrap_enabled | default(False)
+ - name: configure node bootstrap autoapprover
+ include_role:
+ name: openshift_bootstrap_autoapprover
+ tasks_from: main
+ when:
+ - openshift_master_bootstrap_enabled | default(False)
+ - openshift_master_bootstrap_auto_approve | default(False) | bool
diff --git a/playbooks/gcp/openshift-cluster/inventory.yml b/playbooks/gcp/openshift-cluster/inventory.yml
new file mode 100644
index 000000000..96de6d6db
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/inventory.yml
@@ -0,0 +1,10 @@
+---
+- name: Set up the connection variables for retrieving inventory from GCE
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: materialize the inventory
+ include_role:
+ name: openshift_gcp
+ tasks_from: dynamic_inventory.yml
diff --git a/playbooks/gcp/openshift-cluster/launch.yml b/playbooks/gcp/openshift-cluster/launch.yml
new file mode 100644
index 000000000..02f00408a
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/launch.yml
@@ -0,0 +1,12 @@
+# This playbook launches a new cluster or converges it if already launched
+---
+- import_playbook: build_image.yml
+ when: openshift_gcp_build_image | default(False) | bool
+
+- import_playbook: provision.yml
+
+- hosts: localhost
+ tasks:
+ - meta: refresh_inventory
+
+- import_playbook: install.yml
diff --git a/playbooks/gcp/provision.yml b/playbooks/gcp/openshift-cluster/provision.yml
index b6edf9961..293a195c9 100644
--- a/playbooks/gcp/provision.yml
+++ b/playbooks/gcp/openshift-cluster/provision.yml
@@ -3,11 +3,10 @@
hosts: localhost
connection: local
gather_facts: no
+ roles:
+ - openshift_gcp
tasks:
-
- - name: provision a GCP cluster in the specified project
+ - name: recalculate the dynamic inventory
import_role:
name: openshift_gcp
-
-- name: run the cluster deploy
- import_playbook: ../deploy_cluster.yml
+ tasks_from: dynamic_inventory.yml
diff --git a/playbooks/gcp/openshift-cluster/publish_image.yml b/playbooks/gcp/openshift-cluster/publish_image.yml
new file mode 100644
index 000000000..76fd49e9c
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/publish_image.yml
@@ -0,0 +1,9 @@
+---
+- name: Publish the most recent image
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - import_role:
+ name: openshift_gcp
+ tasks_from: publish_image.yml
diff --git a/playbooks/gcp/openshift-cluster/roles b/playbooks/gcp/openshift-cluster/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-policy.yaml b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-policy.yaml
new file mode 100644
index 000000000..90ee40943
--- /dev/null
+++ b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-policy.yaml
@@ -0,0 +1,10 @@
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: bootstrap-autoapprover
+roleRef:
+ kind: ClusterRole
+ name: system:node-bootstrap-autoapprover
+subjects:
+- kind: User
+ name: system:serviceaccount:openshift-infra:bootstrap-autoapprover
diff --git a/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-role.yaml b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-role.yaml
new file mode 100644
index 000000000..d8143d047
--- /dev/null
+++ b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-role.yaml
@@ -0,0 +1,21 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: system:node-bootstrap-autoapprover
+rules:
+- apiGroups:
+ - certificates.k8s.io
+ resources:
+ - certificatesigningrequests
+ verbs:
+ - delete
+ - get
+ - list
+ - watch
+- apiGroups:
+ - certificates.k8s.io
+ resources:
+ - certificatesigningrequests/approval
+ verbs:
+ - create
+ - update
diff --git a/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-serviceaccount.yaml b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-serviceaccount.yaml
new file mode 100644
index 000000000..e22ce6f34
--- /dev/null
+++ b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-serviceaccount.yaml
@@ -0,0 +1,5 @@
+kind: ServiceAccount
+apiVersion: v1
+metadata:
+ name: bootstrap-autoapprover
+ namespace: openshift-infra
diff --git a/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller.yaml b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller.yaml
new file mode 100644
index 000000000..dbcedb407
--- /dev/null
+++ b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller.yaml
@@ -0,0 +1,68 @@
+kind: StatefulSet
+apiVersion: apps/v1beta1
+metadata:
+ name: bootstrap-autoapprover
+ namespace: openshift-infra
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app: bootstrap-autoapprover
+ spec:
+ serviceAccountName: bootstrap-autoapprover
+ terminationGracePeriodSeconds: 1
+ containers:
+ - name: signer
+ image: openshift/node:v3.7.0-rc.0
+ command:
+ - /bin/bash
+ - -c
+ args:
+ - |
+ #!/bin/bash
+ set -o errexit
+ set -o nounset
+ set -o pipefail
+
+ unset KUBECONFIG
+ cat <<SCRIPT > /tmp/signer
+ #!/bin/bash
+ #
+ # It will approve any CSR that is not approved yet, and delete any CSR that expired more than 60 seconds
+ # ago.
+ #
+
+ set -o errexit
+ set -o nounset
+ set -o pipefail
+
+ name=\${1}
+ condition=\${2}
+ certificate=\${3}
+ username=\${4}
+
+ # auto approve
+ if [[ -z "\${condition}" && ("\${username}" == "system:serviceaccount:openshift-infra:node-bootstrapper" || "\${username}" == "system:node:"* ) ]]; then
+ oc adm certificate approve "\${name}"
+ exit 0
+ fi
+
+ # check certificate age
+ if [[ -n "\${certificate}" ]]; then
+ text="\$( echo "\${certificate}" | base64 -d - )"
+ if ! echo "\${text}" | openssl x509 -noout; then
+ echo "error: Unable to parse certificate" 2>&1
+ exit 1
+ fi
+ if ! echo "\${text}" | openssl x509 -checkend -60 > /dev/null; then
+ echo "Certificate is expired, deleting"
+ oc delete csr "\${name}"
+ fi
+ exit 0
+ fi
+ SCRIPT
+ chmod u+x /tmp/signer
+
+ exec oc observe csr --maximum-errors=1 --resync-period=10m -a '{.status.conditions[*].type}' -a '{.status.certificate}' -a '{.spec.username}' -- /tmp/signer
diff --git a/roles/openshift_bootstrap_autoapprover/tasks/main.yml b/roles/openshift_bootstrap_autoapprover/tasks/main.yml
new file mode 100644
index 000000000..88e9d08e7
--- /dev/null
+++ b/roles/openshift_bootstrap_autoapprover/tasks/main.yml
@@ -0,0 +1,28 @@
+---
+- name: Copy auto-approver config to host
+ run_once: true
+ copy:
+ src: "{{ item }}"
+ dest: /tmp/openshift-approver/
+ owner: root
+ mode: 0400
+ with_fileglob:
+ - "*.yaml"
+
+- name: Set auto-approver nodeSelector
+ run_once: true
+ yedit:
+ src: "/tmp/openshift-approver/openshift-bootstrap-controller.yaml"
+ key: spec.template.spec.nodeSelector
+ value: "{{ openshift_master_bootstrap_auto_approver_node_selector | default({}) }}"
+ value_type: list
+
+- name: Create auto-approver on cluster
+ run_once: true
+ command: oc apply -f /tmp/openshift-approver/
+
+- name: Remove auto-approver config
+ run_once: true
+ file:
+ path: /tmp/openshift-approver/
+ state: absent
diff --git a/roles/openshift_cloud_provider/tasks/gce.yml b/roles/openshift_cloud_provider/tasks/gce.yml
index ee4048911..395bd304c 100644
--- a/roles/openshift_cloud_provider/tasks/gce.yml
+++ b/roles/openshift_cloud_provider/tasks/gce.yml
@@ -13,5 +13,11 @@
ini_file:
dest: "{{ openshift.common.config_base }}/cloudprovider/gce.conf"
section: Global
- option: multizone
- value: "true"
+ option: "{{ item.key }}"
+ value: "{{ item.value }}"
+ with_items:
+ - { key: 'project-id', value: '{{ openshift_gcp_project }}' }
+ - { key: 'network-name', value: '{{ openshift_gcp_network_name }}' }
+ - { key: 'node-tags', value: '{{ openshift_gcp_prefix }}ocp' }
+ - { key: 'node-instance-prefix', value: '{{ openshift_gcp_prefix }}' }
+ - { key: 'multizone', value: 'false' }
diff --git a/roles/openshift_gcp/files/bootstrap-script.sh b/roles/openshift_gcp/files/bootstrap-script.sh
new file mode 100644
index 000000000..0c3f1999b
--- /dev/null
+++ b/roles/openshift_gcp/files/bootstrap-script.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+#
+# This script is a startup script for bootstrapping a GCP node
+# from a config stored in the project metadata. It loops until
+# it finds the script and then starts the origin-node service.
+# TODO: generalize
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+if [[ "$( curl "http://metadata.google.internal/computeMetadata/v1/instance/attributes/bootstrap" -H "Metadata-Flavor: Google" )" != "true" ]]; then
+ echo "info: Bootstrap is not enabled for this instance, skipping" 1>&2
+ exit 0
+fi
+
+if ! id=$( curl "http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-id" -H "Metadata-Flavor: Google" ); then
+ echo "error: Unable to get cluster-id for instance from cluster metadata" 1>&2
+ exit 1
+fi
+
+if ! node_group=$( curl "http://metadata.google.internal/computeMetadata/v1/instance/attributes/node-group" -H "Metadata-Flavor: Google" ); then
+ echo "error: Unable to get node-group for instance from cluster metadata" 1>&2
+ exit 1
+fi
+
+if ! config=$( curl -f "http://metadata.google.internal/computeMetadata/v1/instance/attributes/bootstrap-config" -H "Metadata-Flavor: Google" 2>/dev/null ); then
+ while true; do
+ if config=$( curl -f "http://metadata.google.internal/computeMetadata/v1/project/attributes/${id}-bootstrap-config" -H "Metadata-Flavor: Google" 2>/dev/null ); then
+ break
+ fi
+ echo "info: waiting for ${id}-bootstrap-config to become available in cluster metadata ..." 1>&2
+ sleep 5
+ done
+fi
+
+echo "Got bootstrap config from metadata"
+mkdir -p /etc/origin/node
+echo -n "${config}" > /etc/origin/node/bootstrap.kubeconfig
+echo "BOOTSTRAP_CONFIG_NAME=node-config-${node_group}" >> /etc/sysconfig/origin-node
+systemctl enable origin-node
+systemctl start origin-node
diff --git a/roles/openshift_gcp/files/openshift-bootstrap-update.service b/roles/openshift_gcp/files/openshift-bootstrap-update.service
new file mode 100644
index 000000000..c65b1b34e
--- /dev/null
+++ b/roles/openshift_gcp/files/openshift-bootstrap-update.service
@@ -0,0 +1,7 @@
+[Unit]
+Description=Update the OpenShift node bootstrap configuration
+
+[Service]
+Type=oneshot
+ExecStart=/usr/bin/openshift-bootstrap-update
+User=root
diff --git a/roles/openshift_gcp/files/openshift-bootstrap-update.timer b/roles/openshift_gcp/files/openshift-bootstrap-update.timer
new file mode 100644
index 000000000..1a517b33e
--- /dev/null
+++ b/roles/openshift_gcp/files/openshift-bootstrap-update.timer
@@ -0,0 +1,10 @@
+[Unit]
+Description=Update the OpenShift node bootstrap credentials hourly
+
+[Timer]
+OnBootSec=30s
+OnCalendar=hourly
+Persistent=true
+
+[Install]
+WantedBy=timers.target \ No newline at end of file
diff --git a/roles/openshift_gcp_image_prep/files/partition.conf b/roles/openshift_gcp/files/partition.conf
index b87e5e0b6..76e65ab9c 100644
--- a/roles/openshift_gcp_image_prep/files/partition.conf
+++ b/roles/openshift_gcp/files/partition.conf
@@ -1,3 +1,3 @@
[Service]
ExecStartPost=-/usr/bin/growpart /dev/sda 1
-ExecStartPost=-/sbin/xfs_growfs /
+ExecStartPost=-/sbin/xfs_growfs / \ No newline at end of file
diff --git a/roles/openshift_gcp/meta/main.yml b/roles/openshift_gcp/meta/main.yml
new file mode 100644
index 000000000..5e428f8de
--- /dev/null
+++ b/roles/openshift_gcp/meta/main.yml
@@ -0,0 +1,17 @@
+---
+galaxy_info:
+ author: Clayton Coleman
+ description:
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.8
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- role: lib_utils
+- role: lib_openshift
diff --git a/roles/openshift_gcp/tasks/add_custom_repositories.yml b/roles/openshift_gcp/tasks/add_custom_repositories.yml
new file mode 100644
index 000000000..04718f78e
--- /dev/null
+++ b/roles/openshift_gcp/tasks/add_custom_repositories.yml
@@ -0,0 +1,20 @@
+---
+- name: Copy custom repository secrets
+ copy:
+ src: "{{ files_dir }}/{{ item.1.sslclientcert }}"
+ dest: /var/lib/yum/custom_secret_{{ item.0 }}_cert
+ when: item.1.sslclientcert | default(false)
+ with_indexed_items: "{{ provision_custom_repositories }}"
+- name: Copy custom repository secrets
+ copy:
+ src: "{{ files_dir }}/{{ item.1.sslclientkey }}"
+ dest: /var/lib/yum/custom_secret_{{ item.0 }}_key
+ when: item.1.sslclientkey | default(false)
+ with_indexed_items: "{{ provision_custom_repositories }}"
+
+- name: Create any custom repos that are defined
+ template:
+ src: yum_repo.j2
+ dest: /etc/yum.repos.d/provision_custom_repositories.repo
+ when: provision_custom_repositories | length > 0
+ notify: refresh cache
diff --git a/roles/openshift_gcp_image_prep/tasks/main.yaml b/roles/openshift_gcp/tasks/configure_gcp_base_image.yml
index fee5ab618..2c6e2790a 100644
--- a/roles/openshift_gcp_image_prep/tasks/main.yaml
+++ b/roles/openshift_gcp/tasks/configure_gcp_base_image.yml
@@ -1,18 +1,10 @@
----
# GCE instances are starting with xfs AND barrier=1, which is only for extfs.
+---
- name: Remove barrier=1 from XFS fstab entries
- lineinfile:
- path: /etc/fstab
- regexp: '^(.+)xfs(.+?),?barrier=1,?(.*?)$'
- line: '\1xfs\2 \4'
- backrefs: yes
+ command: sed -i -e 's/xfs\(.*\)barrier=1/xfs\1/g; s/, / /g' /etc/fstab
- name: Ensure the root filesystem has XFS group quota turned on
- lineinfile:
- path: /boot/grub2/grub.cfg
- regexp: '^(.*)linux16 (.*)$'
- line: '\1linux16 \2 rootflags=gquota'
- backrefs: yes
+ command: sed -i -e 's/linux16 \(.*\)$/linux16 \1 rootflags=gquota/g' /boot/grub2/grub.cfg
- name: Ensure the root partition grows on startup
copy: src=partition.conf dest=/etc/systemd/system/google-instance-setup.service.d/
diff --git a/roles/openshift_gcp/tasks/configure_master_bootstrap.yml b/roles/openshift_gcp/tasks/configure_master_bootstrap.yml
new file mode 100644
index 000000000..591cb593c
--- /dev/null
+++ b/roles/openshift_gcp/tasks/configure_master_bootstrap.yml
@@ -0,0 +1,36 @@
+#
+# These tasks configure the instance to periodically update the project metadata with the
+# latest bootstrap kubeconfig from the project metadata. This keeps the project metadata
+# in sync with the cluster's configuration. We then invoke a CSR approve on any nodes that
+# are waiting to join the cluster.
+#
+---
+- name: Copy unit service
+ copy:
+ src: openshift-bootstrap-update.timer
+ dest: /etc/systemd/system/openshift-bootstrap-update.timer
+ owner: root
+ group: root
+ mode: 0664
+
+- name: Copy unit timer
+ copy:
+ src: openshift-bootstrap-update.service
+ dest: /etc/systemd/system/openshift-bootstrap-update.service
+ owner: root
+ group: root
+ mode: 0664
+
+- name: Create bootstrap update script
+ template: src=openshift-bootstrap-update.j2 dest=/usr/bin/openshift-bootstrap-update mode=u+rx
+
+- name: Start bootstrap update timer
+ systemd:
+ name: "openshift-bootstrap-update.timer"
+ state: started
+
+- name: Bootstrap all nodes that were identified with bootstrap metadata
+ run_once: true
+ oc_adm_csr:
+ nodes: "{{ groups['all'] | map('extract', hostvars) | selectattr('gce_metadata.bootstrap', 'match', 'true') | map(attribute='gce_name') | list }}"
+ timeout: 60
diff --git a/roles/openshift_gcp/tasks/configure_master_healthcheck.yml b/roles/openshift_gcp/tasks/configure_master_healthcheck.yml
new file mode 100644
index 000000000..aa9655977
--- /dev/null
+++ b/roles/openshift_gcp/tasks/configure_master_healthcheck.yml
@@ -0,0 +1,19 @@
+---
+- name: refresh yum cache
+ command: yum clean all
+ args:
+ warn: no
+ when: ansible_os_family == "RedHat"
+
+- name: install haproxy
+ package: name=haproxy state=present
+ register: result
+ until: '"failed" not in result'
+ retries: 10
+ delay: 10
+
+- name: configure haproxy
+ template: src=master_healthcheck.j2 dest=/etc/haproxy/haproxy.cfg
+
+- name: start and enable haproxy service
+ service: name=haproxy state=started enabled=yes
diff --git a/roles/openshift_gcp/tasks/dynamic_inventory.yml b/roles/openshift_gcp/tasks/dynamic_inventory.yml
new file mode 100644
index 000000000..1637da945
--- /dev/null
+++ b/roles/openshift_gcp/tasks/dynamic_inventory.yml
@@ -0,0 +1,5 @@
+---
+- name: Extract PEM from service account file
+ copy: content="{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).private_key }}" dest=/tmp/gce.pem mode=0600
+- name: Templatize environment script
+ template: src=inventory.j2.sh dest=/tmp/inventory.sh mode=u+rx
diff --git a/roles/openshift_gcp/tasks/frequent_log_rotation.yml b/roles/openshift_gcp/tasks/frequent_log_rotation.yml
new file mode 100644
index 000000000..0b4b27f84
--- /dev/null
+++ b/roles/openshift_gcp/tasks/frequent_log_rotation.yml
@@ -0,0 +1,18 @@
+---
+- name: Rotate logs daily
+ replace:
+ dest: /etc/logrotate.conf
+ regexp: '^weekly|monthly|yearly$'
+ replace: daily
+- name: Rotate at a smaller size of log
+ lineinfile:
+ dest: /etc/logrotate.conf
+ state: present
+ regexp: '^size'
+ line: size 10M
+- name: Limit total size of log files
+ lineinfile:
+ dest: /etc/logrotate.conf
+ state: present
+ regexp: '^maxsize'
+ line: maxsize 20M
diff --git a/roles/openshift_gcp/tasks/main.yaml b/roles/openshift_gcp/tasks/main.yml
index ad205ba33..fb147bc78 100644
--- a/roles/openshift_gcp/tasks/main.yaml
+++ b/roles/openshift_gcp/tasks/main.yml
@@ -17,7 +17,7 @@
- name: Provision GCP DNS domain
command: /tmp/openshift_gcp_provision_dns.sh
args:
- chdir: "{{ playbook_dir }}/files"
+ chdir: "{{ files_dir }}"
register: dns_provision
when:
- state | default('present') == 'present'
@@ -33,7 +33,7 @@
- name: Provision GCP resources
command: /tmp/openshift_gcp_provision.sh
args:
- chdir: "{{ playbook_dir }}/files"
+ chdir: "{{ files_dir }}"
when:
- state | default('present') == 'present'
diff --git a/roles/openshift_gcp/tasks/node_cloud_config.yml b/roles/openshift_gcp/tasks/node_cloud_config.yml
new file mode 100644
index 000000000..4e982f497
--- /dev/null
+++ b/roles/openshift_gcp/tasks/node_cloud_config.yml
@@ -0,0 +1,12 @@
+---
+- name: ensure the /etc/origin folder exists
+ file: name=/etc/origin state=directory
+
+- name: configure gce cloud config options
+ ini_file: dest=/etc/origin/cloudprovider/gce.conf section=Global option={{ item.key }} value={{ item.value }} state=present create=yes
+ with_items:
+ - { key: 'project-id', value: '{{ openshift_gcp_project }}' }
+ - { key: 'network-name', value: '{{ openshift_gcp_network_name }}' }
+ - { key: 'node-tags', value: '{{ openshift_gcp_prefix }}ocp' }
+ - { key: 'node-instance-prefix', value: '{{ openshift_gcp_prefix }}' }
+ - { key: 'multizone', value: 'false' }
diff --git a/roles/openshift_gcp/tasks/publish_image.yml b/roles/openshift_gcp/tasks/publish_image.yml
new file mode 100644
index 000000000..db8a7ca69
--- /dev/null
+++ b/roles/openshift_gcp/tasks/publish_image.yml
@@ -0,0 +1,32 @@
+---
+- name: Require openshift_gcp_image
+ fail:
+ msg: "A source image name or family is required for image publishing. Please ensure `openshift_gcp_image` is defined."
+ when: openshift_gcp_image is undefined
+
+- name: Require openshift_gcp_target_image
+ fail:
+ msg: "A target image name or family is required for image publishing. Please ensure `openshift_gcp_target_image` is defined."
+ when: openshift_gcp_target_image is undefined
+
+- block:
+ - name: Retrieve images in the {{ openshift_gcp_target_image }} family
+ command: >
+ gcloud --project "{{ openshift_gcp_project }}" compute images list
+ "--filter=family={{ openshift_gcp_target_image }}"
+ --format=json --sort-by ~creationTimestamp
+ register: images
+ - name: Prune oldest images
+ command: >
+ gcloud --project "{{ openshift_gcp_project }}" compute images delete "{{ item['name'] }}"
+ with_items: "{{ (images.stdout | default('[]') | from_json )[( openshift_gcp_keep_images | int ):] }}"
+ when: openshift_gcp_keep_images is defined
+
+- name: Copy the latest image in the family {{ openshift_gcp_image }} to {{ openshift_gcp_target_image }}
+ command: >
+ gcloud --project "{{ openshift_gcp_target_project | default(openshift_gcp_project) }}"
+ beta compute images create
+ "{{ openshift_gcp_target_image_name | default(openshift_gcp_target_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}"
+ --family "{{ openshift_gcp_target_image }}"
+ --source-image-family "{{ openshift_gcp_image }}"
+ --source-image-project "{{ openshift_gcp_project }}"
diff --git a/roles/openshift_gcp/tasks/setup_scale_group_facts.yml b/roles/openshift_gcp/tasks/setup_scale_group_facts.yml
new file mode 100644
index 000000000..0fda43123
--- /dev/null
+++ b/roles/openshift_gcp/tasks/setup_scale_group_facts.yml
@@ -0,0 +1,44 @@
+---
+- name: Add masters to requisite groups
+ add_host:
+ name: "{{ hostvars[item].gce_name }}"
+ groups: masters, etcd
+ with_items: "{{ groups['tag_ocp-master'] }}"
+
+- name: Add a master to the primary masters group
+ add_host:
+ name: "{{ hostvars[item].gce_name }}"
+ groups: primary_master
+ with_items: "{{ groups['tag_ocp-master'].0 }}"
+
+- name: Add non-bootstrapping master node instances to node group
+ add_host:
+ name: "{{ hostvars[item].gce_name }}"
+ groups: nodes
+ openshift_node_labels:
+ role: infra
+ with_items: "{{ groups['tag_ocp-master'] | default([]) | difference(groups['tag_ocp-bootstrap'] | default([])) }}"
+
+- name: Add infra node instances to node group
+ add_host:
+ name: "{{ hostvars[item].gce_name }}"
+ groups: nodes
+ openshift_node_labels:
+ role: infra
+ with_items: "{{ groups['tag_ocp-infra-node'] | default([]) | difference(groups['tag_ocp-bootstrap'] | default([])) }}"
+
+- name: Add node instances to node group
+ add_host:
+ name: "{{ hostvars[item].gce_name }}"
+ groups: nodes
+ openshift_node_labels:
+ role: app
+ with_items: "{{ groups['tag_ocp-node'] | default([]) | difference(groups['tag_ocp-bootstrap'] | default([])) }}"
+
+- name: Add bootstrap node instances
+ add_host:
+ name: "{{ hostvars[item].gce_name }}"
+ groups: bootstrap_nodes
+ openshift_node_bootstrap: True
+ with_items: "{{ groups['tag_ocp-node'] | default([]) | intersect(groups['tag_ocp-bootstrap'] | default([])) }}"
+ when: not (openshift_node_bootstrap | default(False))
diff --git a/roles/openshift_gcp/templates/inventory.j2.sh b/roles/openshift_gcp/templates/inventory.j2.sh
new file mode 100644
index 000000000..dcaffb578
--- /dev/null
+++ b/roles/openshift_gcp/templates/inventory.j2.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+export GCE_PROJECT="{{ openshift_gcp_project }}"
+export GCE_ZONE="{{ openshift_gcp_zone }}"
+export GCE_EMAIL="{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+export GCE_PEM_FILE_PATH="/tmp/gce.pem"
+export INVENTORY_IP_TYPE="{{ inventory_ip_type }}"
+export GCE_TAGGED_INSTANCES="{{ openshift_gcp_prefix }}ocp" \ No newline at end of file
diff --git a/roles/openshift_gcp/templates/master_healthcheck.j2 b/roles/openshift_gcp/templates/master_healthcheck.j2
new file mode 100644
index 000000000..189e578c5
--- /dev/null
+++ b/roles/openshift_gcp/templates/master_healthcheck.j2
@@ -0,0 +1,68 @@
+#---------------------------------------------------------------------
+# Example configuration for a possible web application. See the
+# full configuration options online.
+#
+# http://haproxy.1wt.eu/download/1.4/doc/configuration.txt
+#
+#---------------------------------------------------------------------
+
+#---------------------------------------------------------------------
+# Global settings
+#---------------------------------------------------------------------
+global
+ # to have these messages end up in /var/log/haproxy.log you will
+ # need to:
+ #
+ # 1) configure syslog to accept network log events. This is done
+ # by adding the '-r' option to the SYSLOGD_OPTIONS in
+ # /etc/sysconfig/syslog
+ #
+ # 2) configure local2 events to go to the /var/log/haproxy.log
+ # file. A line like the following can be added to
+ # /etc/sysconfig/syslog
+ #
+ # local2.* /var/log/haproxy.log
+ #
+ log 127.0.0.1 local2
+
+ chroot /var/lib/haproxy
+ pidfile /var/run/haproxy.pid
+ maxconn 4000
+ user haproxy
+ group haproxy
+ daemon
+
+ # turn on stats unix socket
+ stats socket /var/lib/haproxy/stats
+
+#---------------------------------------------------------------------
+# common defaults that all the 'listen' and 'backend' sections will
+# use if not designated in their block
+#---------------------------------------------------------------------
+defaults
+ mode http
+ log global
+ option httplog
+ option dontlognull
+ option http-server-close
+ option forwardfor except 127.0.0.0/8
+ option redispatch
+ retries 3
+ timeout http-request 10s
+ timeout queue 1m
+ timeout connect 10s
+ timeout client 1m
+ timeout server 1m
+ timeout http-keep-alive 10s
+ timeout check 10s
+ maxconn 3000
+
+#---------------------------------------------------------------------
+# main frontend which proxys to the backends
+#---------------------------------------------------------------------
+frontend http-proxy *:8080
+ acl url_healthz path_beg -i /healthz
+ use_backend ocp if url_healthz
+
+backend ocp
+ server ocp localhost:{{ internal_console_port }} ssl verify none
diff --git a/roles/openshift_gcp/templates/openshift-bootstrap-update.j2 b/roles/openshift_gcp/templates/openshift-bootstrap-update.j2
new file mode 100644
index 000000000..5b0563724
--- /dev/null
+++ b/roles/openshift_gcp/templates/openshift-bootstrap-update.j2
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+set -euo pipefail
+
+oc serviceaccounts create-kubeconfig -n openshift-infra node-bootstrapper > /root/bootstrap.kubeconfig
+gcloud compute project-info --project '{{ openshift_gcp_project }}' add-metadata --metadata-from-file '{{ openshift_gcp_prefix + openshift_gcp_clusterid | default("default") }}-bootstrap-config=/root/bootstrap.kubeconfig'
+rm -f /root/bootstrap.kubeconfig
diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh
index 4d150bc74..794985322 100644
--- a/roles/openshift_gcp/templates/provision.j2.sh
+++ b/roles/openshift_gcp/templates/provision.j2.sh
@@ -9,15 +9,26 @@ if [[ -n "{{ openshift_gcp_ssh_private_key }}" ]]; then
ssh-add "{{ openshift_gcp_ssh_private_key }}" || true
fi
- # Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there
- pub_key=$(cut -d ' ' -f 2 < "{{ openshift_gcp_ssh_private_key }}.pub")
+ # Check if the public key is in the project metadata, and if not, add it there
+ if [ -f "{{ openshift_gcp_ssh_private_key }}.pub" ]; then
+ pub_file="{{ openshift_gcp_ssh_private_key }}.pub"
+ pub_key=$(cut -d ' ' -f 2 < "{{ openshift_gcp_ssh_private_key }}.pub")
+ else
+ keyfile="${HOME}/.ssh/google_compute_engine"
+ pub_file="${keyfile}.pub"
+ mkdir -p "${HOME}/.ssh"
+ cp "{{ openshift_gcp_ssh_private_key }}" "${keyfile}"
+ chmod 0600 "${keyfile}"
+ ssh-keygen -y -f "${keyfile}" > "${pub_file}"
+ pub_key=$(cut -d ' ' -f 2 < "${pub_file}")
+ fi
key_tmp_file='/tmp/ocp-gce-keys'
if ! gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q "$pub_key"; then
if gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q ssh-rsa; then
gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file"
fi
echo -n 'cloud-user:' >> "$key_tmp_file"
- cat "{{ openshift_gcp_ssh_private_key }}.pub" >> "$key_tmp_file"
+ cat "${pub_file}" >> "$key_tmp_file"
gcloud --project "{{ openshift_gcp_project }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}"
rm -f "$key_tmp_file"
fi
diff --git a/roles/openshift_gcp/templates/yum_repo.j2 b/roles/openshift_gcp/templates/yum_repo.j2
new file mode 100644
index 000000000..77919ea75
--- /dev/null
+++ b/roles/openshift_gcp/templates/yum_repo.j2
@@ -0,0 +1,20 @@
+{% for repo in provision_custom_repositories %}
+[{{ repo.id | default(repo.name) }}]
+name={{ repo.name | default(repo.id) }}
+baseurl={{ repo.baseurl }}
+{% set enable_repo = repo.enabled | default(1) %}
+enabled={{ 1 if ( enable_repo == 1 or enable_repo == True ) else 0 }}
+{% set enable_gpg_check = repo.gpgcheck | default(1) %}
+gpgcheck={{ 1 if ( enable_gpg_check == 1 or enable_gpg_check == True ) else 0 }}
+{% if 'sslclientcert' in repo %}
+sslclientcert={{ "/var/lib/yum/custom_secret_" + (loop.index-1)|string + "_cert" if repo.sslclientcert }}
+{% endif %}
+{% if 'sslclientkey' in repo %}
+sslclientkey={{ "/var/lib/yum/custom_secret_" + (loop.index-1)|string + "_key" if repo.sslclientkey }}
+{% endif %}
+{% for key, value in repo.iteritems() %}
+{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck', 'sslclientkey', 'sslclientcert'] and value is defined %}
+{{ key }}={{ value }}
+{% endif %}
+{% endfor %}
+{% endfor %}
diff --git a/roles/openshift_web_console/tasks/update_console_config.yml b/roles/openshift_web_console/tasks/update_console_config.yml
index 41da2c16a..967222ea4 100644
--- a/roles/openshift_web_console/tasks/update_console_config.yml
+++ b/roles/openshift_web_console/tasks/update_console_config.yml
@@ -55,6 +55,7 @@
state: present
from_file:
webconsole-config.yaml: "{{ mktemp_console.stdout }}/webconsole-config.yaml"
+ register: update_console_config_map
- name: Remove temp directory
file:
@@ -62,5 +63,5 @@
name: "{{ mktemp_console.stdout }}"
changed_when: False
- # TODO: Only rollout if config has changed.
- include_tasks: rollout_console.yml
+ when: update_console_config_map.changed | bool