summaryrefslogtreecommitdiffstats
path: root/utils
diff options
context:
space:
mode:
Diffstat (limited to 'utils')
-rw-r--r--utils/src/ooinstall/cli_installer.py56
-rw-r--r--utils/src/ooinstall/oo_config.py4
-rw-r--r--utils/src/ooinstall/openshift_ansible.py30
-rw-r--r--utils/test/cli_installer_tests.py61
-rw-r--r--utils/test/fixture.py20
5 files changed, 120 insertions, 51 deletions
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index dc88cb1ad..3046d4d58 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -33,9 +33,7 @@ def is_valid_hostname(hostname):
def validate_prompt_hostname(hostname):
if '' == hostname or is_valid_hostname(hostname):
return hostname
- raise click.BadParameter('"{}" appears to be an invalid hostname. ' \
- 'Please double-check this value i' \
- 'and re-enter it.'.format(hostname))
+ raise click.BadParameter('Invalid hostname. Please double-check this value and re-enter it.')
def get_ansible_ssh_user():
click.clear()
@@ -47,6 +45,15 @@ passwordless sudo access.
click.echo(message)
return click.prompt('User for ssh access', default='root')
+def get_master_routingconfig_subdomain():
+ click.clear()
+ message = """
+You might want to override the default subdomain uses for exposed routes. If you don't know what
+this is, use the default value.
+"""
+ click.echo(message)
+ return click.prompt('New default subdomain (ENTER for none)', default='')
+
def list_hosts(hosts):
hosts_idx = range(len(hosts))
for idx in hosts_idx:
@@ -72,7 +79,7 @@ def delete_hosts(hosts):
click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx))
return hosts, None
-def collect_hosts(version=None, masters_set=False, print_summary=True):
+def collect_hosts(oo_cfg, existing_env=False, masters_set=False, print_summary=True):
"""
Collect host information from user. This will later be filled in using
ansible.
@@ -125,26 +132,29 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
host_props['master'] = True
num_masters += 1
- if version == '3.0':
+ if oo_cfg.settings['variant_version'] == '3.0':
masters_set = True
host_props['node'] = True
- #TODO: Reenable this option once container installs are out of tech preview
- #rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?',
- # type=click.Choice(['rpm', 'container']),
- # default='rpm')
- #if rpm_or_container == 'container':
- # host_props['containerized'] = True
- #else:
- # host_props['containerized'] = False
host_props['containerized'] = False
+ if oo_cfg.settings['variant_version'] != '3.0':
+ rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?',
+ type=click.Choice(['rpm', 'container']),
+ default='rpm')
+ if rpm_or_container == 'container':
+ host_props['containerized'] = True
+
+ if existing_env:
+ host_props['new_host'] = True
+ else:
+ host_props['new_host'] = False
host = Host(**host_props)
hosts.append(host)
if print_summary:
- print_installation_summary(hosts)
+ print_installation_summary(hosts, oo_cfg.settings['variant_version'])
# If we have one master, this is enough for an all-in-one deployment,
# thus we can start asking if you wish to proceed. Otherwise we assume
@@ -158,7 +168,7 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
return hosts
-def print_installation_summary(hosts):
+def print_installation_summary(hosts, version=None):
"""
Displays a summary of all hosts configured thus far, and what role each
will play.
@@ -179,7 +189,7 @@ def print_installation_summary(hosts):
click.echo('Total OpenShift Masters: %s' % len(masters))
click.echo('Total OpenShift Nodes: %s' % len(nodes))
- if len(masters) == 1:
+ if len(masters) == 1 and version != '3.0':
ha_hint_message = """
NOTE: Add a total of 3 or more Masters to perform an HA installation."""
click.echo(ha_hint_message)
@@ -494,20 +504,24 @@ https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.h
click.clear()
if not oo_cfg.hosts:
- oo_cfg.hosts = collect_hosts(version=oo_cfg.settings['variant_version'])
+ oo_cfg.hosts = collect_hosts(oo_cfg)
+ click.clear()
+
+ if not oo_cfg.settings.get('master_routingconfig_subdomain', None):
+ oo_cfg.settings['master_routingconfig_subdomain'] = get_master_routingconfig_subdomain()
click.clear()
return oo_cfg
-def collect_new_nodes():
+def collect_new_nodes(oo_cfg):
click.clear()
click.echo('*** New Node Configuration ***')
message = """
Add new nodes here
"""
click.echo(message)
- return collect_hosts(masters_set=True, print_summary=False)
+ return collect_hosts(oo_cfg, existing_env=True, masters_set=True, print_summary=False)
def get_installed_hosts(hosts, callback_facts):
installed_hosts = []
@@ -577,7 +591,7 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
sys.exit(1)
else:
if not force:
- new_nodes = collect_new_nodes()
+ new_nodes = collect_new_nodes(oo_cfg)
hosts_to_run_on.extend(new_nodes)
oo_cfg.hosts.extend(new_nodes)
@@ -752,7 +766,7 @@ def install(ctx, force):
check_hosts_config(oo_cfg, ctx.obj['unattended'])
- print_installation_summary(oo_cfg.hosts)
+ print_installation_summary(oo_cfg.hosts, oo_cfg.settings.get('variant_version', None))
click.echo('Gathering information from hosts...')
callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts,
verbose)
diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py
index 031b82bc1..33ab27567 100644
--- a/utils/src/ooinstall/oo_config.py
+++ b/utils/src/ooinstall/oo_config.py
@@ -38,6 +38,7 @@ class Host(object):
self.public_hostname = kwargs.get('public_hostname', None)
self.connect_to = kwargs.get('connect_to', None)
self.preconfigured = kwargs.get('preconfigured', None)
+ self.new_host = kwargs.get('new_host', None)
# Should this host run as an OpenShift master:
self.master = kwargs.get('master', False)
@@ -68,7 +69,8 @@ class Host(object):
""" Used when exporting to yaml. """
d = {}
for prop in ['ip', 'hostname', 'public_ip', 'public_hostname',
- 'master', 'node', 'master_lb', 'containerized', 'connect_to', 'preconfigured']:
+ 'master', 'node', 'master_lb', 'containerized',
+ 'connect_to', 'preconfigured', 'new_host']:
# If the property is defined (not None or False), export it:
if getattr(self, prop):
d[prop] = getattr(self, prop)
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index 17196a813..cbb6f33e1 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -19,13 +19,15 @@ def generate_inventory(hosts):
global CFG
masters = [host for host in hosts if host.master]
nodes = [host for host in hosts if host.node]
+ new_nodes = [host for host in hosts if host.node and host.new_host]
proxy = determine_proxy_configuration(hosts)
multiple_masters = len(masters) > 1
+ scaleup = len(new_nodes) > 0
base_inventory_path = CFG.settings['ansible_inventory_path']
base_inventory = open(base_inventory_path, 'w')
- write_inventory_children(base_inventory, multiple_masters, proxy)
+ write_inventory_children(base_inventory, multiple_masters, proxy, scaleup)
write_inventory_vars(base_inventory, multiple_masters, proxy)
@@ -71,6 +73,11 @@ def generate_inventory(hosts):
base_inventory.write('\n[lb]\n')
write_host(proxy, base_inventory)
+ if scaleup:
+ base_inventory.write('\n[new_nodes]\n')
+ for node in new_nodes:
+ write_host(node, base_inventory)
+
base_inventory.close()
return base_inventory_path
@@ -84,12 +91,14 @@ def determine_proxy_configuration(hosts):
return None
-def write_inventory_children(base_inventory, multiple_masters, proxy):
+def write_inventory_children(base_inventory, multiple_masters, proxy, scaleup):
global CFG
base_inventory.write('\n[OSEv3:children]\n')
base_inventory.write('masters\n')
base_inventory.write('nodes\n')
+ if scaleup:
+ base_inventory.write('new_nodes\n')
if multiple_masters:
base_inventory.write('etcd\n')
if not getattr(proxy, 'preconfigured', True):
@@ -105,6 +114,9 @@ def write_inventory_vars(base_inventory, multiple_masters, proxy):
base_inventory.write('openshift_master_cluster_method=native\n')
base_inventory.write("openshift_master_cluster_hostname={}\n".format(proxy.hostname))
base_inventory.write("openshift_master_cluster_public_hostname={}\n".format(proxy.public_hostname))
+ if CFG.settings['master_routingconfig_subdomain']:
+ base_inventory.write("osm_default_subdomain={}\n".format(CFG.settings['master_routingconfig_subdomain']))
+
def write_host(host, inventory, schedulable=None):
@@ -119,6 +131,8 @@ def write_host(host, inventory, schedulable=None):
facts += ' openshift_hostname={}'.format(host.hostname)
if host.public_hostname:
facts += ' openshift_public_hostname={}'.format(host.public_hostname)
+ if host.containerized:
+ facts += ' containerized={}'.format(host.containerized)
# TODO: For not write_host is handles both master and nodes.
# Technically only nodes will ever need this.
@@ -157,9 +171,15 @@ def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False):
status = subprocess.call(args, env=env_vars, stdout=FNULL)
if not status == 0:
return [], 1
- callback_facts_file = open(CFG.settings['ansible_callback_facts_yaml'], 'r')
- callback_facts = yaml.load(callback_facts_file)
- callback_facts_file.close()
+
+ with open(CFG.settings['ansible_callback_facts_yaml'], 'r') as callback_facts_file:
+ try:
+ callback_facts = yaml.safe_load(callback_facts_file)
+ except yaml.YAMLError, exc:
+ print "Error in {}".format(CFG.settings['ansible_callback_facts_yaml']), exc
+ print "Try deleting and rerunning the atomic-openshift-installer"
+ sys.exit(1)
+
return callback_facts, 0
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
index 1da49c807..baab5d56f 100644
--- a/utils/test/cli_installer_tests.py
+++ b/utils/test/cli_installer_tests.py
@@ -102,6 +102,7 @@ hosts:
QUICKHA_CONFIG = """
variant: %s
ansible_ssh_user: root
+master_routingconfig_subdomain: example.com
hosts:
- connect_to: 10.0.0.1
ip: 10.0.0.1
@@ -228,6 +229,7 @@ hosts:
QUICKHA_CONFIG_PRECONFIGURED_LB = """
variant: %s
ansible_ssh_user: root
+master_routingconfig_subdomain: example.com
hosts:
- connect_to: 10.0.0.1
ip: 10.0.0.1
@@ -681,9 +683,9 @@ class AttendedCliTests(OOCliFixture):
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
- ('10.0.0.1', True),
- ('10.0.0.2', False),
- ('10.0.0.3', False)],
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', False, False),
+ ('10.0.0.3', False, False)],
ssh_user='root',
variant_num=1,
confirm_facts='y')
@@ -722,10 +724,10 @@ class AttendedCliTests(OOCliFixture):
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
- ('10.0.0.1', True),
- ('10.0.0.2', False),
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', False, False),
],
- add_nodes=[('10.0.0.3', False)],
+ add_nodes=[('10.0.0.3', False, False)],
ssh_user='root',
variant_num=1,
confirm_facts='y')
@@ -773,9 +775,9 @@ class AttendedCliTests(OOCliFixture):
mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
cli_input = build_input(hosts=[
- ('10.0.0.1', True),
+ ('10.0.0.1', True, False),
],
- add_nodes=[('10.0.0.2', False)],
+ add_nodes=[('10.0.0.2', False, False)],
ssh_user='root',
variant_num=1,
schedulable_masters_ok=True,
@@ -796,10 +798,10 @@ class AttendedCliTests(OOCliFixture):
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
- ('10.0.0.1', True),
- ('10.0.0.2', True),
- ('10.0.0.3', True),
- ('10.0.0.4', False)],
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', True, False),
+ ('10.0.0.3', True, False),
+ ('10.0.0.4', False, False)],
ssh_user='root',
variant_num=1,
confirm_facts='y',
@@ -837,9 +839,9 @@ class AttendedCliTests(OOCliFixture):
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
- ('10.0.0.1', True),
- ('10.0.0.2', True),
- ('10.0.0.3', True)],
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', True, False),
+ ('10.0.0.3', True, False)],
ssh_user='root',
variant_num=1,
confirm_facts='y',
@@ -872,10 +874,10 @@ class AttendedCliTests(OOCliFixture):
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
- ('10.0.0.1', True),
- ('10.0.0.2', True),
- ('10.0.0.3', False),
- ('10.0.0.4', True)],
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', True, False),
+ ('10.0.0.3', False, False),
+ ('10.0.0.4', True, False)],
ssh_user='root',
variant_num=1,
confirm_facts='y',
@@ -893,7 +895,7 @@ class AttendedCliTests(OOCliFixture):
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
- ('10.0.0.1', True)],
+ ('10.0.0.1', True, False)],
ssh_user='root',
variant_num=1,
confirm_facts='y')
@@ -913,6 +915,25 @@ class AttendedCliTests(OOCliFixture):
self.assertEquals('True',
inventory.get('nodes', '10.0.0.1 openshift_schedulable'))
+ #interactive 3.0 install confirm no HA hints
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_ha_hint(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = build_input(hosts=[
+ ('10.0.0.1', True, False)],
+ ssh_user='root',
+ variant_num=2,
+ confirm_facts='y')
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli, self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+ self.assertTrue("NOTE: Add a total of 3 or more Masters to perform an HA installation."
+ not in result.output)
+
# TODO: test with config file, attended add node
# TODO: test with config file, attended new node already in config file
# TODO: test with config file, attended new node already in config file, plus manually added nodes
diff --git a/utils/test/fixture.py b/utils/test/fixture.py
index 90bd9e1ef..1b1c2e5c2 100644
--- a/utils/test/fixture.py
+++ b/utils/test/fixture.py
@@ -11,6 +11,7 @@ from click.testing import CliRunner
SAMPLE_CONFIG = """
variant: %s
ansible_ssh_user: root
+master_routingconfig_subdomain: example.com
hosts:
- connect_to: 10.0.0.1
ip: 10.0.0.1
@@ -138,7 +139,7 @@ class OOCliFixture(OOInstallFixture):
self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
-#pylint: disable=too-many-arguments,too-many-branches
+#pylint: disable=too-many-arguments,too-many-branches,too-many-statements
def build_input(ssh_user=None, hosts=None, variant_num=None,
add_nodes=None, confirm_facts=None, schedulable_masters_ok=None,
master_lb=None):
@@ -163,13 +164,19 @@ def build_input(ssh_user=None, hosts=None, variant_num=None,
num_masters = 0
if hosts:
i = 0
- for (host, is_master) in hosts:
+ for (host, is_master, is_containerized) in hosts:
inputs.append(host)
if is_master:
inputs.append('y')
num_masters += 1
else:
inputs.append('n')
+
+ if is_containerized:
+ inputs.append('container')
+ else:
+ inputs.append('rpm')
+
#inputs.append('rpm')
# We should not be prompted to add more hosts if we're currently at
# 2 masters, this is an invalid HA configuration, so this question
@@ -190,14 +197,20 @@ def build_input(ssh_user=None, hosts=None, variant_num=None,
inputs.append(master_lb[0])
inputs.append('y' if master_lb[1] else 'n')
+ inputs.append('example.com')
+
# TODO: support option 2, fresh install
if add_nodes:
if schedulable_masters_ok:
inputs.append('y')
inputs.append('1') # Add more nodes
i = 0
- for (host, is_master) in add_nodes:
+ for (host, is_master, is_containerized) in add_nodes:
inputs.append(host)
+ if is_containerized:
+ inputs.append('container')
+ else:
+ inputs.append('rpm')
#inputs.append('rpm')
if i < len(add_nodes) - 1:
inputs.append('y') # Add more hosts
@@ -218,4 +231,3 @@ def build_input(ssh_user=None, hosts=None, variant_num=None,
])
return '\n'.join(inputs)
-