summaryrefslogtreecommitdiffstats
path: root/utils/src
diff options
context:
space:
mode:
Diffstat (limited to 'utils/src')
-rw-r--r--utils/src/DESCRIPTION.rst13
-rw-r--r--utils/src/MANIFEST.in9
-rw-r--r--utils/src/data/data_file1
-rw-r--r--utils/src/ooinstall/__init__.py5
-rw-r--r--utils/src/ooinstall/ansible_plugins/facts_callback.py88
-rw-r--r--utils/src/ooinstall/cli_installer.py913
-rw-r--r--utils/src/ooinstall/oo_config.py265
-rw-r--r--utils/src/ooinstall/openshift_ansible.py273
-rw-r--r--utils/src/ooinstall/variants.py77
9 files changed, 1644 insertions, 0 deletions
diff --git a/utils/src/DESCRIPTION.rst b/utils/src/DESCRIPTION.rst
new file mode 100644
index 000000000..68b3a57f2
--- /dev/null
+++ b/utils/src/DESCRIPTION.rst
@@ -0,0 +1,13 @@
+A sample Python project
+=======================
+
+This is the description file for the project.
+
+The file should use UTF-8 encoding and be written using ReStructured Text. It
+will be used to generate the project webpage on PyPI, and should be written for
+that purpose.
+
+Typical contents for this file would include an overview of the project, basic
+usage examples, etc. Generally, including the project changelog in here is not
+a good idea, although a simple "What's New" section for the most recent version
+may be appropriate.
diff --git a/utils/src/MANIFEST.in b/utils/src/MANIFEST.in
new file mode 100644
index 000000000..d4153e738
--- /dev/null
+++ b/utils/src/MANIFEST.in
@@ -0,0 +1,9 @@
+include DESCRIPTION.rst
+
+# Include the test suite (FIXME: does not work yet)
+# recursive-include tests *
+
+# If using Python 2.6 or less, then have to include package data, even though
+# it's already declared in setup.py
+include ooinstall/*
+include ansible.cfg
diff --git a/utils/src/data/data_file b/utils/src/data/data_file
new file mode 100644
index 000000000..7c0646bfd
--- /dev/null
+++ b/utils/src/data/data_file
@@ -0,0 +1 @@
+some data \ No newline at end of file
diff --git a/utils/src/ooinstall/__init__.py b/utils/src/ooinstall/__init__.py
new file mode 100644
index 000000000..944dea3b5
--- /dev/null
+++ b/utils/src/ooinstall/__init__.py
@@ -0,0 +1,5 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=missing-docstring
+
+from .oo_config import OOConfig
diff --git a/utils/src/ooinstall/ansible_plugins/facts_callback.py b/utils/src/ooinstall/ansible_plugins/facts_callback.py
new file mode 100644
index 000000000..ea6ed6574
--- /dev/null
+++ b/utils/src/ooinstall/ansible_plugins/facts_callback.py
@@ -0,0 +1,88 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,no-value-for-parameter
+
+import os
+import yaml
+
+class CallbackModule(object):
+
+ def __init__(self):
+ ######################
+ # This is ugly stoopid. This should be updated in the following ways:
+ # 1) it should probably only be used for the
+ # openshift_facts.yml playbook, so maybe there's some way to check
+ # a variable that's set when that playbook is run?
+ try:
+ self.hosts_yaml_name = os.environ['OO_INSTALL_CALLBACK_FACTS_YAML']
+ except KeyError:
+ raise ValueError('The OO_INSTALL_CALLBACK_FACTS_YAML environment '
+ 'variable must be set.')
+ self.hosts_yaml = os.open(self.hosts_yaml_name, os.O_CREAT |
+ os.O_WRONLY)
+
+ def on_any(self, *args, **kwargs):
+ pass
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ pass
+
+ def runner_on_ok(self, host, res):
+ if res['invocation']['module_args'] == 'var=result':
+ facts = res['var']['result']['ansible_facts']['openshift']
+ hosts_yaml = {}
+ hosts_yaml[host] = facts
+ os.write(self.hosts_yaml, yaml.safe_dump(hosts_yaml))
+
+ def runner_on_skipped(self, host, item=None):
+ pass
+
+ def runner_on_unreachable(self, host, res):
+ pass
+
+ def runner_on_no_hosts(self):
+ pass
+
+ def runner_on_async_poll(self, host, res):
+ pass
+
+ def runner_on_async_ok(self, host, res):
+ pass
+
+ def runner_on_async_failed(self, host, res):
+ pass
+
+ def playbook_on_start(self):
+ pass
+
+ def playbook_on_notify(self, host, handler):
+ pass
+
+ def playbook_on_no_hosts_matched(self):
+ pass
+
+ def playbook_on_no_hosts_remaining(self):
+ pass
+
+ def playbook_on_task_start(self, name, is_conditional):
+ pass
+
+ #pylint: disable=too-many-arguments
+ def playbook_on_vars_prompt(self, varname, private=True, prompt=None,
+ encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
+ pass
+
+ def playbook_on_setup(self):
+ pass
+
+ def playbook_on_import_for_host(self, host, imported_file):
+ pass
+
+ def playbook_on_not_import_for_host(self, host, missing_file):
+ pass
+
+ def playbook_on_play_start(self, name):
+ pass
+
+ def playbook_on_stats(self, stats):
+ pass
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
new file mode 100644
index 000000000..c53ca7b18
--- /dev/null
+++ b/utils/src/ooinstall/cli_installer.py
@@ -0,0 +1,913 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,no-value-for-parameter
+
+import click
+import os
+import re
+import sys
+from ooinstall import openshift_ansible
+from ooinstall import OOConfig
+from ooinstall.oo_config import OOConfigInvalidHostError
+from ooinstall.oo_config import Host
+from ooinstall.variants import find_variant, get_variant_version_combos
+
+DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg'
+DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/'
+
+def validate_ansible_dir(path):
+ if not path:
+ raise click.BadParameter('An ansible path must be provided')
+ return path
+ # if not os.path.exists(path)):
+ # raise click.BadParameter("Path \"{}\" doesn't exist".format(path))
+
+def is_valid_hostname(hostname):
+ if not hostname or len(hostname) > 255:
+ return False
+ if hostname[-1] == ".":
+ hostname = hostname[:-1] # strip exactly one dot from the right, if present
+ allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
+ return all(allowed.match(x) for x in hostname.split("."))
+
+def validate_prompt_hostname(hostname):
+ if '' == hostname or is_valid_hostname(hostname):
+ return hostname
+ raise click.BadParameter('Invalid hostname. Please double-check this value and re-enter it.')
+
+def get_ansible_ssh_user():
+ click.clear()
+ message = """
+This installation process will involve connecting to remote hosts via ssh. Any
+account may be used however if a non-root account is used it must have
+passwordless sudo access.
+"""
+ click.echo(message)
+ return click.prompt('User for ssh access', default='root')
+
+def get_master_routingconfig_subdomain():
+ click.clear()
+ message = """
+You might want to override the default subdomain uses for exposed routes. If you don't know what
+this is, use the default value.
+"""
+ click.echo(message)
+ return click.prompt('New default subdomain (ENTER for none)', default='')
+
+def list_hosts(hosts):
+ hosts_idx = range(len(hosts))
+ for idx in hosts_idx:
+ click.echo(' {}: {}'.format(idx, hosts[idx]))
+
+def delete_hosts(hosts):
+ while True:
+ list_hosts(hosts)
+ del_idx = click.prompt('Select host to delete, y/Y to confirm, ' \
+ 'or n/N to add more hosts', default='n')
+ try:
+ del_idx = int(del_idx)
+ hosts.remove(hosts[del_idx])
+ except IndexError:
+ click.echo("\"{}\" doesn't match any hosts listed.".format(del_idx))
+ except ValueError:
+ try:
+ response = del_idx.lower()
+ if response in ['y', 'n']:
+ return hosts, response
+ click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx))
+ except AttributeError:
+ click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx))
+ return hosts, None
+
+def collect_hosts(oo_cfg, existing_env=False, masters_set=False, print_summary=True):
+ """
+ Collect host information from user. This will later be filled in using
+ ansible.
+
+ Returns: a list of host information collected from the user
+ """
+ click.clear()
+ click.echo('*** Host Configuration ***')
+ message = """
+You must now specify the hosts that will compose your OpenShift cluster.
+
+Please enter an IP or hostname to connect to for each system in the cluster.
+You will then be prompted to identify what role you would like this system to
+serve in the cluster.
+
+OpenShift Masters serve the API and web console and coordinate the jobs to run
+across the environment. If desired you can specify multiple Master systems for
+an HA deployment, in which case you will be prompted to identify a *separate*
+system to act as the load balancer for your cluster after all Masters and Nodes
+are defined.
+
+If only one Master is specified, an etcd instance embedded within the OpenShift
+Master service will be used as the datastore. This can be later replaced with a
+separate etcd instance if desired. If multiple Masters are specified, a
+separate etcd cluster will be configured with each Master serving as a member.
+
+Any Masters configured as part of this installation process will also be
+configured as Nodes. This is so that the Master will be able to proxy to Pods
+from the API. By default this Node will be unschedulable but this can be changed
+after installation with 'oadm manage-node'.
+
+OpenShift Nodes provide the runtime environments for containers. They will
+host the required services to be managed by the Master.
+
+http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master
+http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#node
+ """
+ click.echo(message)
+
+ hosts = []
+ more_hosts = True
+ num_masters = 0
+ while more_hosts:
+ host_props = {}
+ host_props['connect_to'] = click.prompt('Enter hostname or IP address',
+ value_proc=validate_prompt_hostname)
+
+ if not masters_set:
+ if click.confirm('Will this host be an OpenShift Master?'):
+ host_props['master'] = True
+ num_masters += 1
+
+ if oo_cfg.settings['variant_version'] == '3.0':
+ masters_set = True
+ host_props['node'] = True
+
+ host_props['containerized'] = False
+ if oo_cfg.settings['variant_version'] != '3.0':
+ rpm_or_container = \
+ click.prompt('Will this host be RPM or Container based (rpm/container)?',
+ type=click.Choice(['rpm', 'container']),
+ default='rpm')
+ if rpm_or_container == 'container':
+ host_props['containerized'] = True
+
+ if existing_env:
+ host_props['new_host'] = True
+ else:
+ host_props['new_host'] = False
+
+ host = Host(**host_props)
+
+ hosts.append(host)
+
+ if print_summary:
+ print_installation_summary(hosts, oo_cfg.settings['variant_version'])
+
+ # If we have one master, this is enough for an all-in-one deployment,
+ # thus we can start asking if you wish to proceed. Otherwise we assume
+ # you must.
+ if masters_set or num_masters != 2:
+ more_hosts = click.confirm('Do you want to add additional hosts?')
+
+ if num_masters == 1:
+ master = next((host for host in hosts if host.master), None)
+ master.storage = True
+ elif num_masters >= 3:
+ collect_master_lb(hosts)
+ collect_storage_host(hosts)
+
+ return hosts
+
+
+def print_installation_summary(hosts, version=None):
+ """
+ Displays a summary of all hosts configured thus far, and what role each
+ will play.
+
+ Shows total nodes/masters, hints for performing/modifying the deployment
+ with additional setup, warnings for invalid or sub-optimal configurations.
+ """
+ click.clear()
+ click.echo('*** Installation Summary ***\n')
+ click.echo('Hosts:')
+ for host in hosts:
+ print_host_summary(hosts, host)
+
+ masters = [host for host in hosts if host.master]
+ nodes = [host for host in hosts if host.node]
+ dedicated_nodes = [host for host in hosts if host.node and not host.master]
+ click.echo('')
+ click.echo('Total OpenShift Masters: %s' % len(masters))
+ click.echo('Total OpenShift Nodes: %s' % len(nodes))
+
+ if len(masters) == 1 and version != '3.0':
+ ha_hint_message = """
+NOTE: Add a total of 3 or more Masters to perform an HA installation."""
+ click.echo(ha_hint_message)
+ elif len(masters) == 2:
+ min_masters_message = """
+WARNING: A minimum of 3 masters are required to perform an HA installation.
+Please add one more to proceed."""
+ click.echo(min_masters_message)
+ elif len(masters) >= 3:
+ ha_message = """
+NOTE: Multiple Masters specified, this will be an HA deployment with a separate
+etcd cluster. You will be prompted to provide the FQDN of a load balancer and
+a host for storage once finished entering hosts.
+"""
+ click.echo(ha_message)
+
+ dedicated_nodes_message = """
+WARNING: Dedicated Nodes are recommended for an HA deployment. If no dedicated
+Nodes are specified, each configured Master will be marked as a schedulable
+Node."""
+
+ min_ha_nodes_message = """
+WARNING: A minimum of 3 dedicated Nodes are recommended for an HA
+deployment."""
+ if len(dedicated_nodes) == 0:
+ click.echo(dedicated_nodes_message)
+ elif len(dedicated_nodes) < 3:
+ click.echo(min_ha_nodes_message)
+
+ click.echo('')
+
+
+def print_host_summary(all_hosts, host):
+ click.echo("- %s" % host.connect_to)
+ if host.master:
+ click.echo(" - OpenShift Master")
+ if host.node:
+ if host.is_dedicated_node():
+ click.echo(" - OpenShift Node (Dedicated)")
+ elif host.is_schedulable_node(all_hosts):
+ click.echo(" - OpenShift Node")
+ else:
+ click.echo(" - OpenShift Node (Unscheduled)")
+ if host.master_lb:
+ if host.preconfigured:
+ click.echo(" - Load Balancer (Preconfigured)")
+ else:
+ click.echo(" - Load Balancer (HAProxy)")
+ if host.master:
+ if host.is_etcd_member(all_hosts):
+ click.echo(" - Etcd Member")
+ else:
+ click.echo(" - Etcd (Embedded)")
+ if host.storage:
+ click.echo(" - Storage")
+
+
+def collect_master_lb(hosts):
+ """
+ Get a valid load balancer from the user and append it to the list of
+ hosts.
+
+ Ensure user does not specify a system already used as a master/node as
+ this is an invalid configuration.
+ """
+ message = """
+Setting up High Availability Masters requires a load balancing solution.
+Please provide a the FQDN of a host that will be configured as a proxy. This
+can be either an existing load balancer configured to balance all masters on
+port 8443 or a new host that will have HAProxy installed on it.
+
+If the host provided does is not yet configured, a reference haproxy load
+balancer will be installed. It's important to note that while the rest of the
+environment will be fault tolerant this reference load balancer will not be.
+It can be replaced post-installation with a load balancer with the same
+hostname.
+"""
+ click.echo(message)
+ host_props = {}
+
+ # Using an embedded function here so we have access to the hosts list:
+ def validate_prompt_lb(hostname):
+ # Run the standard hostname check first:
+ hostname = validate_prompt_hostname(hostname)
+
+ # Make sure this host wasn't already specified:
+ for host in hosts:
+ if host.connect_to == hostname and (host.master or host.node):
+ raise click.BadParameter('Cannot re-use "%s" as a load balancer, '
+ 'please specify a separate host' % hostname)
+ return hostname
+
+ host_props['connect_to'] = click.prompt('Enter hostname or IP address',
+ value_proc=validate_prompt_lb)
+ install_haproxy = \
+ click.confirm('Should the reference haproxy load balancer be installed on this host?')
+ host_props['preconfigured'] = not install_haproxy
+ host_props['master'] = False
+ host_props['node'] = False
+ host_props['master_lb'] = True
+ master_lb = Host(**host_props)
+ hosts.append(master_lb)
+
+def collect_storage_host(hosts):
+ """
+ Get a valid host for storage from the user and append it to the list of
+ hosts.
+ """
+ message = """
+Setting up High Availability Masters requires a storage host. Please provide a
+host that will be configured as a Registry Storage.
+"""
+ click.echo(message)
+ host_props = {}
+
+ hostname_or_ip = click.prompt('Enter hostname or IP address',
+ value_proc=validate_prompt_hostname)
+ existing, existing_host = is_host_already_node_or_master(hostname_or_ip, hosts)
+ if existing and existing_host.node:
+ existing_host.storage = True
+ else:
+ host_props['connect_to'] = hostname_or_ip
+ host_props['preconfigured'] = False
+ host_props['master'] = False
+ host_props['node'] = False
+ host_props['storage'] = True
+ storage = Host(**host_props)
+ hosts.append(storage)
+
+def is_host_already_node_or_master(hostname, hosts):
+ is_existing = False
+ existing_host = None
+
+ for host in hosts:
+ if host.connect_to == hostname and (host.master or host.node):
+ is_existing = True
+ existing_host = host
+
+ return is_existing, existing_host
+
+def confirm_hosts_facts(oo_cfg, callback_facts):
+ hosts = oo_cfg.hosts
+ click.clear()
+ message = """
+A list of the facts gathered from the provided hosts follows. Because it is
+often the case that the hostname for a system inside the cluster is different
+from the hostname that is resolveable from command line or web clients
+these settings cannot be validated automatically.
+
+For some cloud providers the installer is able to gather metadata exposed in
+the instance so reasonable defaults will be provided.
+
+Plese confirm that they are correct before moving forward.
+
+"""
+ notes = """
+Format:
+
+connect_to,IP,public IP,hostname,public hostname
+
+Notes:
+ * The installation host is the hostname from the installer's perspective.
+ * The IP of the host should be the internal IP of the instance.
+ * The public IP should be the externally accessible IP associated with the instance
+ * The hostname should resolve to the internal IP from the instances
+ themselves.
+ * The public hostname should resolve to the external ip from hosts outside of
+ the cloud.
+"""
+
+ # For testing purposes we need to click.echo only once, so build up
+ # the message:
+ output = message
+
+ default_facts_lines = []
+ default_facts = {}
+ for h in hosts:
+ if h.preconfigured == True:
+ continue
+ try:
+ default_facts[h.connect_to] = {}
+ h.ip = callback_facts[h.connect_to]["common"]["ip"]
+ h.public_ip = callback_facts[h.connect_to]["common"]["public_ip"]
+ h.hostname = callback_facts[h.connect_to]["common"]["hostname"]
+ h.public_hostname = callback_facts[h.connect_to]["common"]["public_hostname"]
+ except KeyError:
+ click.echo("Problem fetching facts from {}".format(h.connect_to))
+ continue
+
+ default_facts_lines.append(",".join([h.connect_to,
+ h.ip,
+ h.public_ip,
+ h.hostname,
+ h.public_hostname]))
+ output = "%s\n%s" % (output, ",".join([h.connect_to,
+ h.ip,
+ h.public_ip,
+ h.hostname,
+ h.public_hostname]))
+
+ output = "%s\n%s" % (output, notes)
+ click.echo(output)
+ facts_confirmed = click.confirm("Do the above facts look correct?")
+ if not facts_confirmed:
+ message = """
+Edit %s with the desired values and run `atomic-openshift-installer --unattended install` to restart the install.
+""" % oo_cfg.config_path
+ click.echo(message)
+ # Make sure we actually write out the config file.
+ oo_cfg.save_to_disk()
+ sys.exit(0)
+ return default_facts
+
+
+
+def check_hosts_config(oo_cfg, unattended):
+ click.clear()
+ masters = [host for host in oo_cfg.hosts if host.master]
+
+ if len(masters) == 2:
+ click.echo("A minimum of 3 Masters are required for HA deployments.")
+ sys.exit(1)
+
+ if len(masters) > 1:
+ master_lb = [host for host in oo_cfg.hosts if host.master_lb]
+ if len(master_lb) > 1:
+ click.echo('ERROR: More than one Master load balancer specified. Only one is allowed.')
+ sys.exit(1)
+ elif len(master_lb) == 1:
+ if master_lb[0].master or master_lb[0].node:
+ click.echo('ERROR: The Master load balancer is configured as a master or node. ' \
+ 'Please correct this.')
+ sys.exit(1)
+ else:
+ message = """
+ERROR: No master load balancer specified in config. You must provide the FQDN
+of a load balancer to balance the API (port 8443) on all Master hosts.
+
+https://docs.openshift.org/latest/install_config/install/advanced_install.html#multiple-masters
+"""
+ click.echo(message)
+ sys.exit(1)
+
+ dedicated_nodes = [host for host in oo_cfg.hosts if host.node and not host.master]
+ if len(dedicated_nodes) == 0:
+ message = """
+WARNING: No dedicated Nodes specified. By default, colocated Masters have
+their Nodes set to unschedulable. If you proceed all nodes will be labelled
+as schedulable.
+"""
+ if unattended:
+ click.echo(message)
+ else:
+ confirm_continue(message)
+
+ return
+
+def get_variant_and_version(multi_master=False):
+ message = "\nWhich variant would you like to install?\n\n"
+
+ i = 1
+ combos = get_variant_version_combos()
+ for (variant, version) in combos:
+ message = "%s\n(%s) %s %s" % (message, i, variant.description,
+ version.name)
+ i = i + 1
+ message = "%s\n" % message
+
+ click.echo(message)
+ if multi_master:
+ click.echo('NOTE: 3.0 installations are not')
+ response = click.prompt("Choose a variant from above: ", default=1)
+ product, version = combos[response - 1]
+
+ return product, version
+
+def confirm_continue(message):
+ if message:
+ click.echo(message)
+ click.confirm("Are you ready to continue?", default=False, abort=True)
+ return
+
+def error_if_missing_info(oo_cfg):
+ missing_info = False
+ if not oo_cfg.hosts:
+ missing_info = True
+ click.echo('For unattended installs, hosts must be specified on the '
+ 'command line or in the config file: %s' % oo_cfg.config_path)
+ sys.exit(1)
+
+ if 'ansible_ssh_user' not in oo_cfg.settings:
+ click.echo("Must specify ansible_ssh_user in configuration file.")
+ sys.exit(1)
+
+ # Lookup a variant based on the key we were given:
+ if not oo_cfg.settings['variant']:
+ click.echo("No variant specified in configuration file.")
+ sys.exit(1)
+
+ ver = None
+ if 'variant_version' in oo_cfg.settings:
+ ver = oo_cfg.settings['variant_version']
+ variant, version = find_variant(oo_cfg.settings['variant'], version=ver)
+ if variant is None or version is None:
+ err_variant_name = oo_cfg.settings['variant']
+ if ver:
+ err_variant_name = "%s %s" % (err_variant_name, ver)
+ click.echo("%s is not an installable variant." % err_variant_name)
+ sys.exit(1)
+ oo_cfg.settings['variant_version'] = version.name
+
+ missing_facts = oo_cfg.calc_missing_facts()
+ if len(missing_facts) > 0:
+ missing_info = True
+ click.echo('For unattended installs, facts must be provided for all masters/nodes:')
+ for host in missing_facts:
+ click.echo('Host "%s" missing facts: %s' % (host, ", ".join(missing_facts[host])))
+
+ if missing_info:
+ sys.exit(1)
+
+
+def get_missing_info_from_user(oo_cfg):
+ """ Prompts the user for any information missing from the given configuration. """
+ click.clear()
+
+ message = """
+Welcome to the OpenShift Enterprise 3 installation.
+
+Please confirm that following prerequisites have been met:
+
+* All systems where OpenShift will be installed are running Red Hat Enterprise
+ Linux 7.
+* All systems are properly subscribed to the required OpenShift Enterprise 3
+ repositories.
+* All systems have run docker-storage-setup (part of the Red Hat docker RPM).
+* All systems have working DNS that resolves not only from the perspective of
+ the installer but also from within the cluster.
+
+When the process completes you will have a default configuration for Masters
+and Nodes. For ongoing environment maintenance it's recommended that the
+official Ansible playbooks be used.
+
+For more information on installation prerequisites please see:
+https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.html
+"""
+ confirm_continue(message)
+ click.clear()
+
+ if oo_cfg.settings.get('ansible_ssh_user', '') == '':
+ oo_cfg.settings['ansible_ssh_user'] = get_ansible_ssh_user()
+ click.clear()
+
+ if oo_cfg.settings.get('variant', '') == '':
+ variant, version = get_variant_and_version()
+ oo_cfg.settings['variant'] = variant.name
+ oo_cfg.settings['variant_version'] = version.name
+ click.clear()
+
+ if not oo_cfg.hosts:
+ oo_cfg.hosts = collect_hosts(oo_cfg)
+ click.clear()
+
+ if not oo_cfg.settings.get('master_routingconfig_subdomain', None):
+ oo_cfg.settings['master_routingconfig_subdomain'] = get_master_routingconfig_subdomain()
+ click.clear()
+
+ return oo_cfg
+
+
+def collect_new_nodes(oo_cfg):
+ click.clear()
+ click.echo('*** New Node Configuration ***')
+ message = """
+Add new nodes here
+ """
+ click.echo(message)
+ return collect_hosts(oo_cfg, existing_env=True, masters_set=True, print_summary=False)
+
+def get_installed_hosts(hosts, callback_facts):
+ installed_hosts = []
+
+ # count nativeha lb as an installed host
+ try:
+ first_master = next(host for host in hosts if host.master)
+ lb_hostname = callback_facts[first_master.connect_to]['master'].get('cluster_hostname', '')
+ lb_host = \
+ next(host for host in hosts if host.ip == callback_facts[lb_hostname]['common']['ip'])
+
+ installed_hosts.append(lb_host)
+ except (KeyError, StopIteration):
+ pass
+
+ for host in hosts:
+ if host.connect_to in callback_facts.keys() and is_installed_host(host, callback_facts):
+ installed_hosts.append(host)
+ return installed_hosts
+
+def is_installed_host(host, callback_facts):
+ version_found = 'common' in callback_facts[host.connect_to].keys() and \
+ callback_facts[host.connect_to]['common'].get('version', '') and \
+ callback_facts[host.connect_to]['common'].get('version', '') != 'None'
+
+ return version_found or host.master_lb or host.preconfigured
+
+# pylint: disable=too-many-branches
+# This pylint error will be corrected shortly in separate PR.
+def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
+
+ # Copy the list of existing hosts so we can remove any already installed nodes.
+ hosts_to_run_on = list(oo_cfg.hosts)
+
+ # Check if master or nodes already have something installed
+ installed_hosts = get_installed_hosts(oo_cfg.hosts, callback_facts)
+ if len(installed_hosts) > 0:
+ click.echo('Installed environment detected.')
+ # This check has to happen before we start removing hosts later in this method
+ if not force:
+ if not unattended:
+ click.echo('By default the installer only adds new nodes ' \
+ 'to an installed environment.')
+ response = click.prompt('Do you want to (1) only add additional nodes or ' \
+ '(2) reinstall the existing hosts ' \
+ 'potentially erasing any custom changes?',
+ type=int)
+ # TODO: this should be reworked with error handling.
+ # Click can certainly do this for us.
+ # This should be refactored as soon as we add a 3rd option.
+ if response == 1:
+ force = False
+ if response == 2:
+ force = True
+
+ # present a message listing already installed hosts and remove hosts if needed
+ for host in installed_hosts:
+ if host.master:
+ click.echo("{} is already an OpenShift Master".format(host))
+ # Masters stay in the list, we need to run against them when adding
+ # new nodes.
+ elif host.node:
+ click.echo("{} is already an OpenShift Node".format(host))
+ # force is only used for reinstalls so we don't want to remove
+ # anything.
+ if not force:
+ hosts_to_run_on.remove(host)
+
+ # Handle the cases where we know about uninstalled systems
+ new_hosts = set(hosts_to_run_on) - set(installed_hosts)
+ if len(new_hosts) > 0:
+ for new_host in new_hosts:
+ click.echo("{} is currently uninstalled".format(new_host))
+
+ # Fall through
+ click.echo('Adding additional nodes...')
+ else:
+ if unattended:
+ if not force:
+ click.echo('Installed environment detected and no additional ' \
+ 'nodes specified: aborting. If you want a fresh install, use ' \
+ '`atomic-openshift-installer install --force`')
+ sys.exit(1)
+ else:
+ if not force:
+ new_nodes = collect_new_nodes(oo_cfg)
+
+ hosts_to_run_on.extend(new_nodes)
+ oo_cfg.hosts.extend(new_nodes)
+
+ openshift_ansible.set_config(oo_cfg)
+ click.echo('Gathering information from hosts...')
+ callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts, verbose)
+ if error:
+ click.echo("There was a problem fetching the required information. See " \
+ "{} for details.".format(oo_cfg.settings['ansible_log_path']))
+ sys.exit(1)
+ else:
+ pass # proceeding as normal should do a clean install
+
+ return hosts_to_run_on, callback_facts
+
+
+@click.group()
+@click.pass_context
+@click.option('--unattended', '-u', is_flag=True, default=False)
+@click.option('--configuration', '-c',
+ type=click.Path(file_okay=True,
+ dir_okay=False,
+ writable=True,
+ readable=True),
+ default=None)
+@click.option('--ansible-playbook-directory',
+ '-a',
+ type=click.Path(exists=True,
+ file_okay=False,
+ dir_okay=True,
+ readable=True),
+ # callback=validate_ansible_dir,
+ default=DEFAULT_PLAYBOOK_DIR,
+ envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY')
+@click.option('--ansible-config',
+ type=click.Path(file_okay=True,
+ dir_okay=False,
+ writable=True,
+ readable=True),
+ default=None)
+@click.option('--ansible-log-path',
+ type=click.Path(file_okay=True,
+ dir_okay=False,
+ writable=True,
+ readable=True),
+ default="/tmp/ansible.log")
+@click.option('-v', '--verbose',
+ is_flag=True, default=False)
+#pylint: disable=too-many-arguments
+#pylint: disable=line-too-long
+# Main CLI entrypoint, not much we can do about too many arguments.
+def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose):
+ """
+ atomic-openshift-installer makes the process for installing OSE or AEP
+ easier by interactively gathering the data needed to run on each host.
+ It can also be run in unattended mode if provided with a configuration file.
+
+ Further reading: https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html
+ """
+ ctx.obj = {}
+ ctx.obj['unattended'] = unattended
+ ctx.obj['configuration'] = configuration
+ ctx.obj['ansible_config'] = ansible_config
+ ctx.obj['ansible_log_path'] = ansible_log_path
+ ctx.obj['verbose'] = verbose
+
+ try:
+ oo_cfg = OOConfig(ctx.obj['configuration'])
+ except OOConfigInvalidHostError as e:
+ click.echo(e)
+ sys.exit(1)
+
+ # If no playbook dir on the CLI, check the config:
+ if not ansible_playbook_directory:
+ ansible_playbook_directory = oo_cfg.settings.get('ansible_playbook_directory', '')
+ # If still no playbook dir, check for the default location:
+ if not ansible_playbook_directory and os.path.exists(DEFAULT_PLAYBOOK_DIR):
+ ansible_playbook_directory = DEFAULT_PLAYBOOK_DIR
+ validate_ansible_dir(ansible_playbook_directory)
+ oo_cfg.settings['ansible_playbook_directory'] = ansible_playbook_directory
+ oo_cfg.ansible_playbook_directory = ansible_playbook_directory
+ ctx.obj['ansible_playbook_directory'] = ansible_playbook_directory
+
+ if ctx.obj['ansible_config']:
+ oo_cfg.settings['ansible_config'] = ctx.obj['ansible_config']
+ elif 'ansible_config' not in oo_cfg.settings and \
+ os.path.exists(DEFAULT_ANSIBLE_CONFIG):
+ # If we're installed by RPM this file should exist and we can use it as our default:
+ oo_cfg.settings['ansible_config'] = DEFAULT_ANSIBLE_CONFIG
+
+ oo_cfg.settings['ansible_log_path'] = ctx.obj['ansible_log_path']
+
+ ctx.obj['oo_cfg'] = oo_cfg
+ openshift_ansible.set_config(oo_cfg)
+
+
+@click.command()
+@click.pass_context
+def uninstall(ctx):
+ oo_cfg = ctx.obj['oo_cfg']
+ verbose = ctx.obj['verbose']
+
+ if len(oo_cfg.hosts) == 0:
+ click.echo("No hosts defined in: %s" % oo_cfg.config_path)
+ sys.exit(1)
+
+ click.echo("OpenShift will be uninstalled from the following hosts:\n")
+ if not ctx.obj['unattended']:
+ # Prompt interactively to confirm:
+ for host in oo_cfg.hosts:
+ click.echo(" * %s" % host.connect_to)
+ proceed = click.confirm("\nDo you wish to proceed?")
+ if not proceed:
+ click.echo("Uninstall cancelled.")
+ sys.exit(0)
+
+ openshift_ansible.run_uninstall_playbook(verbose)
+
+
+@click.command()
+@click.pass_context
+def upgrade(ctx):
+ oo_cfg = ctx.obj['oo_cfg']
+ verbose = ctx.obj['verbose']
+
+ if len(oo_cfg.hosts) == 0:
+ click.echo("No hosts defined in: %s" % oo_cfg.config_path)
+ sys.exit(1)
+
+ old_variant = oo_cfg.settings['variant']
+ old_version = oo_cfg.settings['variant_version']
+
+
+ message = """
+ This tool will help you upgrade your existing OpenShift installation.
+"""
+ click.echo(message)
+ click.echo("Version {} found. Do you want to update to the latest version of {} " \
+ "or migrate to the next major release?".format(old_version, old_version))
+ resp = click.prompt("(1) Update to latest {} (2) Migrate to next relese".format(old_version))
+
+ if resp == "2":
+ # TODO: Make this a lot more flexible
+ new_version = "3.1"
+ # Update config to reflect the version we're targetting, we'll write
+ # to disk once ansible completes successfully, not before.
+ if oo_cfg.settings['variant'] == 'enterprise':
+ oo_cfg.settings['variant'] = 'openshift-enterprise'
+ version = find_variant(oo_cfg.settings['variant'])[1]
+ oo_cfg.settings['variant_version'] = version.name
+ else:
+ new_version = old_version
+
+ click.echo("Openshift will be upgraded from %s %s to %s %s on the following hosts:\n" % (
+ old_variant, old_version, oo_cfg.settings['variant'],
+ oo_cfg.settings['variant_version']))
+ for host in oo_cfg.hosts:
+ click.echo(" * %s" % host.connect_to)
+
+ if not ctx.obj['unattended']:
+ # Prompt interactively to confirm:
+ proceed = click.confirm("\nDo you wish to proceed?")
+ if not proceed:
+ click.echo("Upgrade cancelled.")
+ sys.exit(0)
+
+ retcode = openshift_ansible.run_upgrade_playbook(old_version, new_version, verbose)
+ if retcode > 0:
+ click.echo("Errors encountered during upgrade, please check %s." %
+ oo_cfg.settings['ansible_log_path'])
+ else:
+ oo_cfg.save_to_disk()
+ click.echo("Upgrade completed! Rebooting all hosts is recommended.")
+
+
+@click.command()
+@click.option('--force', '-f', is_flag=True, default=False)
+@click.pass_context
+def install(ctx, force):
+ oo_cfg = ctx.obj['oo_cfg']
+ verbose = ctx.obj['verbose']
+
+ if ctx.obj['unattended']:
+ error_if_missing_info(oo_cfg)
+ else:
+ oo_cfg = get_missing_info_from_user(oo_cfg)
+
+ check_hosts_config(oo_cfg, ctx.obj['unattended'])
+
+ print_installation_summary(oo_cfg.hosts, oo_cfg.settings.get('variant_version', None))
+ click.echo('Gathering information from hosts...')
+ callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts,
+ verbose)
+ if error:
+ click.echo("There was a problem fetching the required information. " \
+ "Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
+ sys.exit(1)
+
+ hosts_to_run_on, callback_facts = get_hosts_to_run_on(
+ oo_cfg, callback_facts, ctx.obj['unattended'], force, verbose)
+
+ click.echo('Writing config to: %s' % oo_cfg.config_path)
+
+ # We already verified this is not the case for unattended installs, so this can
+ # only trigger for live CLI users:
+ # TODO: if there are *new* nodes and this is a live install, we may need the user
+ # to confirm the settings for new nodes. Look into this once we're distinguishing
+ # between new and pre-existing nodes.
+ if len(oo_cfg.calc_missing_facts()) > 0:
+ confirm_hosts_facts(oo_cfg, callback_facts)
+
+ oo_cfg.save_to_disk()
+
+ click.echo('Ready to run installation process.')
+ message = """
+If changes are needed please edit the config file above and re-run.
+"""
+ if not ctx.obj['unattended']:
+ confirm_continue(message)
+
+ error = openshift_ansible.run_main_playbook(oo_cfg.hosts,
+ hosts_to_run_on, verbose)
+ if error:
+ # The bootstrap script will print out the log location.
+ message = """
+An error was detected. After resolving the problem please relaunch the
+installation process.
+"""
+ click.echo(message)
+ sys.exit(1)
+ else:
+ message = """
+The installation was successful!
+
+If this is your first time installing please take a look at the Administrator
+Guide for advanced options related to routing, storage, authentication and much
+more:
+
+http://docs.openshift.com/enterprise/latest/admin_guide/overview.html
+"""
+ click.echo(message)
+ click.pause()
+
+cli.add_command(install)
+cli.add_command(upgrade)
+cli.add_command(uninstall)
+
+if __name__ == '__main__':
+ # This is expected behaviour for context passing with click library:
+ # pylint: disable=unexpected-keyword-arg
+ cli(obj={})
diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py
new file mode 100644
index 000000000..c9498542f
--- /dev/null
+++ b/utils/src/ooinstall/oo_config.py
@@ -0,0 +1,265 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,too-many-instance-attributes,too-few-public-methods
+
+import os
+import yaml
+from pkg_resources import resource_filename
+
+PERSIST_SETTINGS = [
+ 'ansible_ssh_user',
+ 'ansible_config',
+ 'ansible_log_path',
+ 'master_routingconfig_subdomain',
+ 'variant',
+ 'variant_version',
+ 'version',
+ ]
+DEFAULT_REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname']
+PRECONFIGURED_REQUIRED_FACTS = ['hostname', 'public_hostname']
+
+
+class OOConfigFileError(Exception):
+ """The provided config file path can't be read/written
+ """
+ pass
+
+
+class OOConfigInvalidHostError(Exception):
+ """ Host in config is missing both ip and hostname. """
+ pass
+
+
+class Host(object):
+ """ A system we will or have installed OpenShift on. """
+ def __init__(self, **kwargs):
+ self.ip = kwargs.get('ip', None)
+ self.hostname = kwargs.get('hostname', None)
+ self.public_ip = kwargs.get('public_ip', None)
+ self.public_hostname = kwargs.get('public_hostname', None)
+ self.connect_to = kwargs.get('connect_to', None)
+ self.preconfigured = kwargs.get('preconfigured', None)
+ self.new_host = kwargs.get('new_host', None)
+
+ # Should this host run as an OpenShift master:
+ self.master = kwargs.get('master', False)
+
+ # Should this host run as an OpenShift node:
+ self.node = kwargs.get('node', False)
+
+ # Should this host run as an HAProxy:
+ self.master_lb = kwargs.get('master_lb', False)
+
+ # Should this host run as an HAProxy:
+ self.storage = kwargs.get('storage', False)
+
+ self.containerized = kwargs.get('containerized', False)
+
+ if self.connect_to is None:
+ raise OOConfigInvalidHostError("You must specify either an ip " \
+ "or hostname as 'connect_to'")
+
+ if self.master is False and self.node is False and \
+ self.master_lb is False and self.storage is False:
+ raise OOConfigInvalidHostError(
+ "You must specify each host as either a master or a node.")
+
+ def __str__(self):
+ return self.connect_to
+
+ def __repr__(self):
+ return self.connect_to
+
+ def to_dict(self):
+ """ Used when exporting to yaml. """
+ d = {}
+ for prop in ['ip', 'hostname', 'public_ip', 'public_hostname',
+ 'master', 'node', 'master_lb', 'storage', 'containerized',
+ 'connect_to', 'preconfigured', 'new_host']:
+ # If the property is defined (not None or False), export it:
+ if getattr(self, prop):
+ d[prop] = getattr(self, prop)
+ return d
+
+ def is_etcd_member(self, all_hosts):
+ """ Will this host be a member of a standalone etcd cluster. """
+ if not self.master:
+ return False
+ masters = [host for host in all_hosts if host.master]
+ if len(masters) > 1:
+ return True
+ return False
+
+ def is_dedicated_node(self):
+ """ Will this host be a dedicated node. (not a master) """
+ return self.node and not self.master
+
+ def is_schedulable_node(self, all_hosts):
+ """ Will this host be a node marked as schedulable. """
+ if not self.node:
+ return False
+ if not self.master:
+ return True
+
+ masters = [host for host in all_hosts if host.master]
+ nodes = [host for host in all_hosts if host.node]
+ if len(masters) == len(nodes):
+ return True
+ return False
+
+
+class OOConfig(object):
+ default_dir = os.path.normpath(
+ os.environ.get('XDG_CONFIG_HOME',
+ os.environ['HOME'] + '/.config/') + '/openshift/')
+ default_file = '/installer.cfg.yml'
+
+ def __init__(self, config_path):
+ if config_path:
+ self.config_path = os.path.normpath(config_path)
+ else:
+ self.config_path = os.path.normpath(self.default_dir +
+ self.default_file)
+ self.settings = {}
+ self._read_config()
+ self._set_defaults()
+
+ def _read_config(self):
+ self.hosts = []
+ try:
+ if os.path.exists(self.config_path):
+ cfgfile = open(self.config_path, 'r')
+ self.settings = yaml.safe_load(cfgfile.read())
+ cfgfile.close()
+
+ # Use the presence of a Description as an indicator this is
+ # a legacy config file:
+ if 'Description' in self.settings:
+ self._upgrade_legacy_config()
+
+ # Parse the hosts into DTO objects:
+ if 'hosts' in self.settings:
+ for host in self.settings['hosts']:
+ self.hosts.append(Host(**host))
+
+ # Watchout for the variant_version coming in as a float:
+ if 'variant_version' in self.settings:
+ self.settings['variant_version'] = \
+ str(self.settings['variant_version'])
+
+ except IOError, ferr:
+ raise OOConfigFileError('Cannot open config file "{}": {}'.format(ferr.filename,
+ ferr.strerror))
+ except yaml.scanner.ScannerError:
+ raise OOConfigFileError(
+ 'Config file "{}" is not a valid YAML document'.format(self.config_path))
+
+ def _upgrade_legacy_config(self):
+ new_hosts = []
+ remove_settings = ['validated_facts', 'Description', 'Name',
+ 'Subscription', 'Vendor', 'Version', 'masters', 'nodes']
+
+ if 'validated_facts' in self.settings:
+ for key, value in self.settings['validated_facts'].iteritems():
+ value['connect_to'] = key
+ if 'masters' in self.settings and key in self.settings['masters']:
+ value['master'] = True
+ if 'nodes' in self.settings and key in self.settings['nodes']:
+ value['node'] = True
+ new_hosts.append(value)
+ self.settings['hosts'] = new_hosts
+
+ for s in remove_settings:
+ if s in self.settings:
+ del self.settings[s]
+
+ # A legacy config implies openshift-enterprise 3.0:
+ self.settings['variant'] = 'openshift-enterprise'
+ self.settings['variant_version'] = '3.0'
+
+ def _set_defaults(self):
+
+ if 'ansible_inventory_directory' not in self.settings:
+ self.settings['ansible_inventory_directory'] = \
+ self._default_ansible_inv_dir()
+ if not os.path.exists(self.settings['ansible_inventory_directory']):
+ os.makedirs(self.settings['ansible_inventory_directory'])
+ if 'ansible_plugins_directory' not in self.settings:
+ self.settings['ansible_plugins_directory'] = \
+ resource_filename(__name__, 'ansible_plugins')
+ if 'version' not in self.settings:
+ self.settings['version'] = 'v1'
+
+ if 'ansible_callback_facts_yaml' not in self.settings:
+ self.settings['ansible_callback_facts_yaml'] = '%s/callback_facts.yaml' % \
+ self.settings['ansible_inventory_directory']
+
+ if 'ansible_ssh_user' not in self.settings:
+ self.settings['ansible_ssh_user'] = ''
+
+ self.settings['ansible_inventory_path'] = \
+ '{}/hosts'.format(self.settings['ansible_inventory_directory'])
+
+ # clean up any empty sets
+ for setting in self.settings.keys():
+ if not self.settings[setting]:
+ self.settings.pop(setting)
+
+ def _default_ansible_inv_dir(self):
+ return os.path.normpath(
+ os.path.dirname(self.config_path) + "/.ansible")
+
+ def calc_missing_facts(self):
+ """
+ Determine which host facts are not defined in the config.
+
+ Returns a hash of host to a list of the missing facts.
+ """
+ result = {}
+
+ for host in self.hosts:
+ missing_facts = []
+ if host.preconfigured:
+ required_facts = PRECONFIGURED_REQUIRED_FACTS
+ else:
+ required_facts = DEFAULT_REQUIRED_FACTS
+
+ for required_fact in required_facts:
+ if not getattr(host, required_fact):
+ missing_facts.append(required_fact)
+ if len(missing_facts) > 0:
+ result[host.connect_to] = missing_facts
+ return result
+
+ def save_to_disk(self):
+ out_file = open(self.config_path, 'w')
+ out_file.write(self.yaml())
+ out_file.close()
+
+ def persist_settings(self):
+ p_settings = {}
+ for setting in PERSIST_SETTINGS:
+ if setting in self.settings and self.settings[setting]:
+ p_settings[setting] = self.settings[setting]
+ p_settings['hosts'] = []
+ for host in self.hosts:
+ p_settings['hosts'].append(host.to_dict())
+
+ if self.settings['ansible_inventory_directory'] != \
+ self._default_ansible_inv_dir():
+ p_settings['ansible_inventory_directory'] = \
+ self.settings['ansible_inventory_directory']
+
+ return p_settings
+
+ def yaml(self):
+ return yaml.safe_dump(self.persist_settings(), default_flow_style=False)
+
+ def __str__(self):
+ return self.yaml()
+
+ def get_host(self, name):
+ for host in self.hosts:
+ if host.connect_to == name:
+ return host
+ return None
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
new file mode 100644
index 000000000..04cccf89d
--- /dev/null
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -0,0 +1,273 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,global-statement,global-variable-not-assigned
+
+import socket
+import subprocess
+import sys
+import os
+import yaml
+from ooinstall.variants import find_variant
+
+CFG = None
+
+def set_config(cfg):
+ global CFG
+ CFG = cfg
+
+def generate_inventory(hosts):
+ global CFG
+ masters = [host for host in hosts if host.master]
+ nodes = [host for host in hosts if host.node]
+ new_nodes = [host for host in hosts if host.node and host.new_host]
+ proxy = determine_proxy_configuration(hosts)
+ storage = determine_storage_configuration(hosts)
+ multiple_masters = len(masters) > 1
+ scaleup = len(new_nodes) > 0
+
+ base_inventory_path = CFG.settings['ansible_inventory_path']
+ base_inventory = open(base_inventory_path, 'w')
+
+ write_inventory_children(base_inventory, multiple_masters, proxy, storage, scaleup)
+
+ write_inventory_vars(base_inventory, multiple_masters, proxy)
+
+ # Find the correct deployment type for ansible:
+ ver = find_variant(CFG.settings['variant'],
+ version=CFG.settings.get('variant_version', None))[1]
+ base_inventory.write('deployment_type={}\n'.format(ver.ansible_key))
+
+ if 'OO_INSTALL_ADDITIONAL_REGISTRIES' in os.environ:
+ base_inventory.write('cli_docker_additional_registries={}\n'
+ .format(os.environ['OO_INSTALL_ADDITIONAL_REGISTRIES']))
+ if 'OO_INSTALL_INSECURE_REGISTRIES' in os.environ:
+ base_inventory.write('cli_docker_insecure_registries={}\n'
+ .format(os.environ['OO_INSTALL_INSECURE_REGISTRIES']))
+ if 'OO_INSTALL_PUDDLE_REPO' in os.environ:
+ # We have to double the '{' here for literals
+ base_inventory.write("openshift_additional_repos=[{{'id': 'ose-devel', "
+ "'name': 'ose-devel', "
+ "'baseurl': '{}', "
+ "'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO']))
+
+ base_inventory.write('\n[masters]\n')
+ for master in masters:
+ write_host(master, base_inventory)
+
+ if len(masters) > 1:
+ base_inventory.write('\n[etcd]\n')
+ for master in masters:
+ write_host(master, base_inventory)
+
+ base_inventory.write('\n[nodes]\n')
+
+ for node in nodes:
+ # Let the fact defaults decide if we're not a master:
+ schedulable = None
+
+ # If the node is also a master, we must explicitly set schedulablity:
+ if node.master:
+ schedulable = node.is_schedulable_node(hosts)
+ write_host(node, base_inventory, schedulable)
+
+ if not getattr(proxy, 'preconfigured', True):
+ base_inventory.write('\n[lb]\n')
+ write_host(proxy, base_inventory)
+
+
+ if scaleup:
+ base_inventory.write('\n[new_nodes]\n')
+ for node in new_nodes:
+ write_host(node, base_inventory)
+
+ if storage:
+ base_inventory.write('\n[nfs]\n')
+ write_host(storage, base_inventory)
+
+ base_inventory.close()
+ return base_inventory_path
+
+def determine_proxy_configuration(hosts):
+ proxy = next((host for host in hosts if host.master_lb), None)
+ if proxy:
+ if proxy.hostname == None:
+ proxy.hostname = proxy.connect_to
+ proxy.public_hostname = proxy.connect_to
+
+ return proxy
+
+def determine_storage_configuration(hosts):
+ storage = next((host for host in hosts if host.storage), None)
+
+ return storage
+
+def write_inventory_children(base_inventory, multiple_masters, proxy, storage, scaleup):
+ global CFG
+
+ base_inventory.write('\n[OSEv3:children]\n')
+ base_inventory.write('masters\n')
+ base_inventory.write('nodes\n')
+ if scaleup:
+ base_inventory.write('new_nodes\n')
+ if multiple_masters:
+ base_inventory.write('etcd\n')
+ if not getattr(proxy, 'preconfigured', True):
+ base_inventory.write('lb\n')
+ if storage:
+ base_inventory.write('nfs\n')
+
+def write_inventory_vars(base_inventory, multiple_masters, proxy):
+ global CFG
+ base_inventory.write('\n[OSEv3:vars]\n')
+ base_inventory.write('ansible_ssh_user={}\n'.format(CFG.settings['ansible_ssh_user']))
+ if CFG.settings['ansible_ssh_user'] != 'root':
+ base_inventory.write('ansible_become=true\n')
+ if multiple_masters and proxy is not None:
+ base_inventory.write('openshift_master_cluster_method=native\n')
+ base_inventory.write("openshift_master_cluster_hostname={}\n".format(proxy.hostname))
+ base_inventory.write(
+ "openshift_master_cluster_public_hostname={}\n".format(proxy.public_hostname))
+ if CFG.settings.get('master_routingconfig_subdomain', False):
+ base_inventory.write(
+ "openshift_master_default_subdomain={}\n".format(CFG.settings['master_routingconfig_subdomain']))
+
+
+
+def write_host(host, inventory, schedulable=None):
+ global CFG
+
+ facts = ''
+ if host.ip:
+ facts += ' openshift_ip={}'.format(host.ip)
+ if host.public_ip:
+ facts += ' openshift_public_ip={}'.format(host.public_ip)
+ if host.hostname:
+ facts += ' openshift_hostname={}'.format(host.hostname)
+ if host.public_hostname:
+ facts += ' openshift_public_hostname={}'.format(host.public_hostname)
+ if host.containerized:
+ facts += ' containerized={}'.format(host.containerized)
+ # TODO: For not write_host is handles both master and nodes.
+ # Technically only nodes will ever need this.
+
+ # Distinguish between three states, no schedulability specified (use default),
+ # explicitly set to True, or explicitly set to False:
+ if schedulable is None:
+ pass
+ elif schedulable:
+ facts += ' openshift_schedulable=True'
+ elif not schedulable:
+ facts += ' openshift_schedulable=False'
+
+ installer_host = socket.gethostname()
+ if installer_host in [host.connect_to, host.hostname, host.public_hostname]:
+ facts += ' ansible_connection=local'
+ if os.geteuid() != 0:
+ no_pwd_sudo = subprocess.call(['sudo', '-n', 'echo', 'openshift'])
+ if no_pwd_sudo == 1:
+ print 'The atomic-openshift-installer requires sudo access without a password.'
+ sys.exit(1)
+ facts += ' ansible_become=true'
+
+ inventory.write('{} {}\n'.format(host.connect_to, facts))
+
+
+def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False):
+ """
+ Retrieves system facts from the remote systems.
+ """
+ FNULL = open(os.devnull, 'w')
+ args = ['ansible-playbook', '-v'] if verbose \
+ else ['ansible-playbook']
+ args.extend([
+ '--inventory-file={}'.format(inventory_file),
+ os_facts_path])
+ status = subprocess.call(args, env=env_vars, stdout=FNULL)
+ if not status == 0:
+ return [], 1
+
+ with open(CFG.settings['ansible_callback_facts_yaml'], 'r') as callback_facts_file:
+ try:
+ callback_facts = yaml.safe_load(callback_facts_file)
+ except yaml.YAMLError, exc:
+ print "Error in {}".format(CFG.settings['ansible_callback_facts_yaml']), exc
+ print "Try deleting and rerunning the atomic-openshift-installer"
+ sys.exit(1)
+
+ return callback_facts, 0
+
+
+def default_facts(hosts, verbose=False):
+ global CFG
+ inventory_file = generate_inventory(hosts)
+ os_facts_path = '{}/playbooks/byo/openshift_facts.yml'.format(CFG.ansible_playbook_directory)
+
+ facts_env = os.environ.copy()
+ facts_env["OO_INSTALL_CALLBACK_FACTS_YAML"] = CFG.settings['ansible_callback_facts_yaml']
+ facts_env["ANSIBLE_CALLBACK_PLUGINS"] = CFG.settings['ansible_plugins_directory']
+ facts_env["OPENSHIFT_MASTER_CLUSTER_METHOD"] = 'native'
+ if 'ansible_log_path' in CFG.settings:
+ facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path']
+ if 'ansible_config' in CFG.settings:
+ facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
+ return load_system_facts(inventory_file, os_facts_path, facts_env, verbose)
+
+
+def run_main_playbook(hosts, hosts_to_run_on, verbose=False):
+ global CFG
+ inventory_file = generate_inventory(hosts_to_run_on)
+ if len(hosts_to_run_on) != len(hosts):
+ main_playbook_path = os.path.join(CFG.ansible_playbook_directory,
+ 'playbooks/byo/openshift-node/scaleup.yml')
+ else:
+ main_playbook_path = os.path.join(CFG.ansible_playbook_directory,
+ 'playbooks/byo/openshift-cluster/config.yml')
+ facts_env = os.environ.copy()
+ if 'ansible_log_path' in CFG.settings:
+ facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
+ if 'ansible_config' in CFG.settings:
+ facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
+ return run_ansible(main_playbook_path, inventory_file, facts_env, verbose)
+
+
+def run_ansible(playbook, inventory, env_vars, verbose=False):
+ args = ['ansible-playbook', '-v'] if verbose \
+ else ['ansible-playbook']
+ args.extend([
+ '--inventory-file={}'.format(inventory),
+ playbook])
+ return subprocess.call(args, env=env_vars)
+
+
+def run_uninstall_playbook(verbose=False):
+ playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
+ 'playbooks/adhoc/uninstall.yml')
+ inventory_file = generate_inventory(CFG.hosts)
+ facts_env = os.environ.copy()
+ if 'ansible_log_path' in CFG.settings:
+ facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
+ if 'ansible_config' in CFG.settings:
+ facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
+ return run_ansible(playbook, inventory_file, facts_env, verbose)
+
+
+def run_upgrade_playbook(old_version, new_version, verbose=False):
+ # TODO: do not hardcode the upgrade playbook, add ability to select the
+ # right playbook depending on the type of upgrade.
+ old_version = old_version.replace('.', '_')
+ new_version = old_version.replace('.', '_')
+ if old_version == new_version:
+ playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
+ 'playbooks/byo/openshift-cluster/upgrades/v{}_minor/upgrade.yml'.format(new_version))
+ else:
+ playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
+ 'playbooks/byo/openshift-cluster/upgrades/v{}_to_v{}/upgrade.yml'.format(old_version,
+ new_version))
+ # TODO: Upgrade inventory for upgrade?
+ inventory_file = generate_inventory(CFG.hosts)
+ facts_env = os.environ.copy()
+ if 'ansible_log_path' in CFG.settings:
+ facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
+ if 'ansible_config' in CFG.settings:
+ facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
+ return run_ansible(playbook, inventory_file, facts_env, verbose)
diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py
new file mode 100644
index 000000000..571025543
--- /dev/null
+++ b/utils/src/ooinstall/variants.py
@@ -0,0 +1,77 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,too-few-public-methods
+
+"""
+Defines the supported variants and versions the installer supports, and metadata
+required to run Ansible correctly.
+
+This module needs to be updated for each major release to allow the new version
+to be specified by the user, and to point the generic variants to the latest
+version.
+"""
+
+
+class Version(object):
+ def __init__(self, name, ansible_key):
+ self.name = name # i.e. 3.0, 3.1
+
+ self.ansible_key = ansible_key
+
+
+class Variant(object):
+ def __init__(self, name, description, versions):
+ # Supported variant name:
+ self.name = name
+
+ # Friendly name for the variant:
+ self.description = description
+
+ self.versions = versions
+
+ def latest_version(self):
+ return self.versions[0]
+
+
+# WARNING: Keep the versions ordered, most recent last:
+OSE = Variant('openshift-enterprise', 'OpenShift Enterprise',
+ [
+ Version('3.1', 'openshift-enterprise'),
+ Version('3.0', 'enterprise')
+ ]
+)
+
+AEP = Variant('atomic-enterprise', 'Atomic Enterprise Platform',
+ [
+ Version('3.1', 'atomic-enterprise')
+ ]
+)
+
+# Ordered list of variants we can install, first is the default.
+SUPPORTED_VARIANTS = (OSE, AEP)
+
+
+def find_variant(name, version=None):
+ """
+ Locate the variant object for the variant given in config file, and
+ the correct version to use for it.
+ Return (None, None) if we can't find a match.
+ """
+ prod = None
+ for prod in SUPPORTED_VARIANTS:
+ if prod.name == name:
+ if version is None:
+ return (prod, prod.latest_version())
+ for v in prod.versions:
+ if v.name == version:
+ return (prod, v)
+
+ return (None, None)
+
+def get_variant_version_combos():
+ combos = []
+ for variant in SUPPORTED_VARIANTS:
+ for ver in variant.versions:
+ combos.append((variant, ver))
+ return combos
+