summaryrefslogtreecommitdiffstats
path: root/utils
diff options
context:
space:
mode:
Diffstat (limited to 'utils')
-rw-r--r--utils/docs/config.md3
-rw-r--r--utils/src/ooinstall/cli_installer.py100
-rw-r--r--utils/src/ooinstall/oo_config.py31
-rw-r--r--utils/src/ooinstall/openshift_ansible.py53
-rw-r--r--utils/test/cli_installer_tests.py222
-rw-r--r--utils/test/oo_config_tests.py22
6 files changed, 305 insertions, 126 deletions
diff --git a/utils/docs/config.md b/utils/docs/config.md
index ee4b157c9..2729f8d37 100644
--- a/utils/docs/config.md
+++ b/utils/docs/config.md
@@ -19,16 +19,19 @@ hosts:
master: true
node: true
containerized: true
+ connect_to: 24.222.0.1
- ip: 10.0.0.2
hostname: node1-private.example.com
public_ip: 24.222.0.2
public_hostname: node1.example.com
node: true
+ connect_to: 10.0.0.2
- ip: 10.0.0.3
hostname: node2-private.example.com
public_ip: 24.222.0.3
public_hostname: node2.example.com
node: true
+ connect_to: 10.0.0.3
```
## Primary Settings
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index 8bee99f90..3c3f45c3b 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -11,7 +11,7 @@ from ooinstall import OOConfig
from ooinstall.oo_config import Host
from ooinstall.variants import find_variant, get_variant_version_combos
-DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-util/ansible.cfg'
+DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg'
DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/'
def validate_ansible_dir(path):
@@ -101,29 +101,26 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
hosts = []
more_hosts = True
- ip_regex = re.compile(r'^\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}$')
-
while more_hosts:
host_props = {}
hostname_or_ip = click.prompt('Enter hostname or IP address:',
default='',
value_proc=validate_prompt_hostname)
- if ip_regex.match(hostname_or_ip):
- host_props['ip'] = hostname_or_ip
- else:
- host_props['hostname'] = hostname_or_ip
+ host_props['connect_to'] = hostname_or_ip
host_props['master'] = click.confirm('Will this host be an OpenShift Master?')
host_props['node'] = True
- rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?',
- type=click.Choice(['rpm', 'container']),
- default='rpm')
- if rpm_or_container == 'container':
- host_props['containerized'] = True
- else:
- host_props['containerized'] = False
+ #TODO: Reenable this option once container installs are out of tech preview
+ #rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?',
+ # type=click.Choice(['rpm', 'container']),
+ # default='rpm')
+ #if rpm_or_container == 'container':
+ # host_props['containerized'] = True
+ #else:
+ # host_props['containerized'] = False
+ host_props['containerized'] = False
host = Host(**host_props)
@@ -150,7 +147,7 @@ Plese confirm that they are correct before moving forward.
notes = """
Format:
-IP,public IP,hostname,public hostname
+connect_to,IP,public IP,hostname,public hostname
Notes:
* The installation host is the hostname from the installer's perspective.
@@ -168,20 +165,20 @@ Notes:
default_facts_lines = []
default_facts = {}
- validated_facts = {}
for h in hosts:
- default_facts[h] = {}
- h.ip = callback_facts[str(h)]["common"]["ip"]
- h.public_ip = callback_facts[str(h)]["common"]["public_ip"]
- h.hostname = callback_facts[str(h)]["common"]["hostname"]
- h.public_hostname = callback_facts[str(h)]["common"]["public_hostname"]
-
- validated_facts[h] = {}
- default_facts_lines.append(",".join([h.ip,
+ default_facts[h.connect_to] = {}
+ h.ip = callback_facts[h.connect_to]["common"]["ip"]
+ h.public_ip = callback_facts[h.connect_to]["common"]["public_ip"]
+ h.hostname = callback_facts[h.connect_to]["common"]["hostname"]
+ h.public_hostname = callback_facts[h.connect_to]["common"]["public_hostname"]
+
+ default_facts_lines.append(",".join([h.connect_to,
+ h.ip,
h.public_ip,
h.hostname,
h.public_hostname]))
- output = "%s\n%s" % (output, ",".join([h.ip,
+ output = "%s\n%s" % (output, ",".join([h.connect_to,
+ h.ip,
h.public_ip,
h.hostname,
h.public_hostname]))
@@ -191,7 +188,7 @@ Notes:
facts_confirmed = click.confirm("Do the above facts look correct?")
if not facts_confirmed:
message = """
-Edit %s with the desired values and rerun atomic-openshift-installer with --unattended .
+Edit %s with the desired values and run `atomic-openshift-installer --unattended install` to restart the install.
""" % oo_cfg.config_path
click.echo(message)
# Make sure we actually write out the config file.
@@ -316,14 +313,16 @@ Add new nodes here
def get_installed_hosts(hosts, callback_facts):
installed_hosts = []
for host in hosts:
- if(host.name in callback_facts.keys()
- and 'common' in callback_facts[host.name].keys()
- and callback_facts[host.name]['common'].get('version', '')
- and callback_facts[host.name]['common'].get('version', '') != 'None'):
+ if(host.connect_to in callback_facts.keys()
+ and 'common' in callback_facts[host.connect_to].keys()
+ and callback_facts[host.connect_to]['common'].get('version', '')
+ and callback_facts[host.connect_to]['common'].get('version', '') != 'None'):
installed_hosts.append(host)
return installed_hosts
-def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
+# pylint: disable=too-many-branches
+# This pylint error will be corrected shortly in separate PR.
+def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
# Copy the list of existing hosts so we can remove any already installed nodes.
hosts_to_run_on = list(oo_cfg.hosts)
@@ -337,7 +336,9 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
if not unattended:
click.echo('By default the installer only adds new nodes to an installed environment.')
response = click.prompt('Do you want to (1) only add additional nodes or ' \
- '(2) perform a clean install?', type=int)
+ '(2) reinstall the existing hosts ' \
+ 'potentially erasing any custom changes?',
+ type=int)
# TODO: this should be reworked with error handling.
# Click can certainly do this for us.
# This should be refactored as soon as we add a 3rd option.
@@ -383,7 +384,7 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
openshift_ansible.set_config(oo_cfg)
click.echo('Gathering information from hosts...')
- callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts)
+ callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts, verbose)
if error:
click.echo("There was a problem fetching the required information. " \
"See {} for details.".format(oo_cfg.settings['ansible_log_path']))
@@ -424,18 +425,23 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
writable=True,
readable=True),
default="/tmp/ansible.log")
+@click.option('-v', '--verbose',
+ is_flag=True, default=False)
#pylint: disable=too-many-arguments
# Main CLI entrypoint, not much we can do about too many arguments.
-def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path):
+def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose):
"""
- The main click CLI module. Responsible for handling most common CLI options,
- assigning any defaults and adding to the context for the sub-commands.
+ atomic-openshift-installer makes the process for installing OSE or AEP easier by interactively gathering the data needed to run on each host.
+ It can also be run in unattended mode if provided with a configuration file.
+
+ Further reading: https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html
"""
ctx.obj = {}
ctx.obj['unattended'] = unattended
ctx.obj['configuration'] = configuration
ctx.obj['ansible_config'] = ansible_config
ctx.obj['ansible_log_path'] = ansible_log_path
+ ctx.obj['verbose'] = verbose
oo_cfg = OOConfig(ctx.obj['configuration'])
@@ -466,6 +472,7 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf
@click.pass_context
def uninstall(ctx):
oo_cfg = ctx.obj['oo_cfg']
+ verbose = ctx.obj['verbose']
if len(oo_cfg.hosts) == 0:
click.echo("No hosts defined in: %s" % oo_cfg['configuration'])
@@ -475,22 +482,23 @@ def uninstall(ctx):
if not ctx.obj['unattended']:
# Prompt interactively to confirm:
for host in oo_cfg.hosts:
- click.echo(" * %s" % host.name)
+ click.echo(" * %s" % host.connect_to)
proceed = click.confirm("\nDo you wish to proceed?")
if not proceed:
click.echo("Uninstall cancelled.")
sys.exit(0)
- openshift_ansible.run_uninstall_playbook()
+ openshift_ansible.run_uninstall_playbook(verbose)
@click.command()
@click.pass_context
def upgrade(ctx):
oo_cfg = ctx.obj['oo_cfg']
+ verbose = ctx.obj['verbose']
if len(oo_cfg.hosts) == 0:
- click.echo("No hosts defined in: %s" % oo_cfg['configuration'])
+ click.echo("No hosts defined in: %s" % oo_cfg.config_path)
sys.exit(1)
# Update config to reflect the version we're targetting, we'll write
@@ -505,7 +513,7 @@ def upgrade(ctx):
old_variant, old_version, oo_cfg.settings['variant'],
oo_cfg.settings['variant_version']))
for host in oo_cfg.hosts:
- click.echo(" * %s" % host.name)
+ click.echo(" * %s" % host.connect_to)
if not ctx.obj['unattended']:
# Prompt interactively to confirm:
@@ -514,11 +522,12 @@ def upgrade(ctx):
click.echo("Upgrade cancelled.")
sys.exit(0)
- retcode = openshift_ansible.run_upgrade_playbook()
+ retcode = openshift_ansible.run_upgrade_playbook(verbose)
if retcode > 0:
click.echo("Errors encountered during upgrade, please check %s." %
oo_cfg.settings['ansible_log_path'])
else:
+ oo_cfg.save_to_disk()
click.echo("Upgrade completed! Rebooting all hosts is recommended.")
@@ -527,6 +536,7 @@ def upgrade(ctx):
@click.pass_context
def install(ctx, force):
oo_cfg = ctx.obj['oo_cfg']
+ verbose = ctx.obj['verbose']
if ctx.obj['unattended']:
error_if_missing_info(oo_cfg)
@@ -534,13 +544,15 @@ def install(ctx, force):
oo_cfg = get_missing_info_from_user(oo_cfg)
click.echo('Gathering information from hosts...')
- callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts)
+ callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts,
+ verbose)
if error:
click.echo("There was a problem fetching the required information. " \
"Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
sys.exit(1)
- hosts_to_run_on, callback_facts = get_hosts_to_run_on(oo_cfg, callback_facts, ctx.obj['unattended'], force)
+ hosts_to_run_on, callback_facts = get_hosts_to_run_on(
+ oo_cfg, callback_facts, ctx.obj['unattended'], force, verbose)
click.echo('Writing config to: %s' % oo_cfg.config_path)
@@ -562,7 +574,7 @@ If changes are needed to the values recorded by the installer please update {}.
confirm_continue(message)
error = openshift_ansible.run_main_playbook(oo_cfg.hosts,
- hosts_to_run_on)
+ hosts_to_run_on, verbose)
if error:
# The bootstrap script will print out the log location.
message = """
diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py
index 4281947f1..9c97e6e93 100644
--- a/utils/src/ooinstall/oo_config.py
+++ b/utils/src/ooinstall/oo_config.py
@@ -35,6 +35,7 @@ class Host(object):
self.hostname = kwargs.get('hostname', None)
self.public_ip = kwargs.get('public_ip', None)
self.public_hostname = kwargs.get('public_hostname', None)
+ self.connect_to = kwargs.get('connect_to', None)
# Should this host run as an OpenShift master:
self.master = kwargs.get('master', False)
@@ -43,30 +44,25 @@ class Host(object):
self.node = kwargs.get('node', False)
self.containerized = kwargs.get('containerized', False)
- if self.ip is None and self.hostname is None:
- raise OOConfigInvalidHostError("You must specify either 'ip' or 'hostname'")
+ if self.connect_to is None:
+ raise OOConfigInvalidHostError("You must specify either and 'ip' " \
+ "or 'hostname' to connect to.")
if self.master is False and self.node is False:
raise OOConfigInvalidHostError(
"You must specify each host as either a master or a node.")
- # Hosts can be specified with an ip, hostname, or both. However we need
- # something authoritative we can connect to and refer to the host by.
- # Preference given to the IP if specified as this is more specific.
- # We know one must be set by this point.
- self.name = self.ip if self.ip is not None else self.hostname
-
def __str__(self):
- return self.name
+ return self.connect_to
def __repr__(self):
- return self.name
+ return self.connect_to
def to_dict(self):
""" Used when exporting to yaml. """
d = {}
for prop in ['ip', 'hostname', 'public_ip', 'public_hostname',
- 'master', 'node', 'containerized']:
+ 'master', 'node', 'containerized', 'connect_to']:
# If the property is defined (not None or False), export it:
if getattr(self, prop):
d[prop] = getattr(self, prop)
@@ -120,8 +116,12 @@ class OOConfig(object):
def _upgrade_legacy_config(self):
new_hosts = []
+ remove_settings = ['validated_facts', 'Description', 'Name',
+ 'Subscription', 'Vendor', 'Version', 'masters', 'nodes']
+
if 'validated_facts' in self.settings:
for key, value in self.settings['validated_facts'].iteritems():
+ value['connect_to'] = key
if 'masters' in self.settings and key in self.settings['masters']:
value['master'] = True
if 'nodes' in self.settings and key in self.settings['nodes']:
@@ -129,10 +129,9 @@ class OOConfig(object):
new_hosts.append(value)
self.settings['hosts'] = new_hosts
- remove_settings = ['validated_facts', 'Description', 'Name',
- 'Subscription', 'Vendor', 'Version', 'masters', 'nodes']
for s in remove_settings:
- del self.settings[s]
+ if s in self.settings:
+ del self.settings[s]
# A legacy config implies openshift-enterprise 3.0:
self.settings['variant'] = 'openshift-enterprise'
@@ -182,7 +181,7 @@ class OOConfig(object):
if not getattr(host, required_fact):
missing_facts.append(required_fact)
if len(missing_facts) > 0:
- result[host.name] = missing_facts
+ result[host.connect_to] = missing_facts
return result
def save_to_disk(self):
@@ -214,6 +213,6 @@ class OOConfig(object):
def get_host(self, name):
for host in self.hosts:
- if host.name == name:
+ if host.connect_to == name:
return host
return None
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index 0648df0fa..fdd0c1168 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -79,28 +79,29 @@ def write_host(host, inventory, scheduleable=True):
if not scheduleable:
facts += ' openshift_scheduleable=False'
installer_host = socket.gethostname()
- if host.hostname == installer_host or host.public_hostname == installer_host:
+ if installer_host in [host.connect_to, host.hostname, host.public_hostname]:
facts += ' ansible_connection=local'
if os.geteuid() != 0:
- no_pwd_sudo = subprocess.call(['sudo', '-v', '-n'])
+ no_pwd_sudo = subprocess.call(['sudo', '-n', 'echo openshift'])
if no_pwd_sudo == 1:
print 'The atomic-openshift-installer requires sudo access without a password.'
sys.exit(1)
facts += ' ansible_become=true'
- inventory.write('{} {}\n'.format(host, facts))
+ inventory.write('{} {}\n'.format(host.connect_to, facts))
-def load_system_facts(inventory_file, os_facts_path, env_vars):
+def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False):
"""
Retrieves system facts from the remote systems.
"""
FNULL = open(os.devnull, 'w')
- status = subprocess.call(['ansible-playbook',
- '--inventory-file={}'.format(inventory_file),
- os_facts_path],
- env=env_vars,
- stdout=FNULL)
+ args = ['ansible-playbook', '-v'] if verbose \
+ else ['ansible-playbook']
+ args.extend([
+ '--inventory-file={}'.format(inventory_file),
+ os_facts_path])
+ status = subprocess.call(args, env=env_vars, stdout=FNULL)
if not status == 0:
return [], 1
callback_facts_file = open(CFG.settings['ansible_callback_facts_yaml'], 'r')
@@ -109,7 +110,7 @@ def load_system_facts(inventory_file, os_facts_path, env_vars):
return callback_facts, 0
-def default_facts(hosts):
+def default_facts(hosts, verbose=False):
global CFG
inventory_file = generate_inventory(hosts)
os_facts_path = '{}/playbooks/byo/openshift_facts.yml'.format(CFG.ansible_playbook_directory)
@@ -121,12 +122,12 @@ def default_facts(hosts):
facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path']
if 'ansible_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
- return load_system_facts(inventory_file, os_facts_path, facts_env)
+ return load_system_facts(inventory_file, os_facts_path, facts_env, verbose)
-def run_main_playbook(hosts, hosts_to_run_on):
+def run_main_playbook(hosts, hosts_to_run_on, verbose=False):
global CFG
- inventory_file = generate_inventory(hosts)
+ inventory_file = generate_inventory(hosts_to_run_on)
if len(hosts_to_run_on) != len(hosts):
main_playbook_path = os.path.join(CFG.ansible_playbook_directory,
'playbooks/common/openshift-cluster/scaleup.yml')
@@ -138,17 +139,19 @@ def run_main_playbook(hosts, hosts_to_run_on):
facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
if 'ansible_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
- return run_ansible(main_playbook_path, inventory_file, facts_env)
+ return run_ansible(main_playbook_path, inventory_file, facts_env, verbose)
-def run_ansible(playbook, inventory, env_vars):
- return subprocess.call(['ansible-playbook',
- '--inventory-file={}'.format(inventory),
- playbook],
- env=env_vars)
+def run_ansible(playbook, inventory, env_vars, verbose=False):
+ args = ['ansible-playbook', '-v'] if verbose \
+ else ['ansible-playbook']
+ args.extend([
+ '--inventory-file={}'.format(inventory),
+ playbook])
+ return subprocess.call(args, env=env_vars)
-def run_uninstall_playbook():
+def run_uninstall_playbook(verbose=False):
playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
'playbooks/adhoc/uninstall.yml')
inventory_file = generate_inventory(CFG.hosts)
@@ -157,12 +160,14 @@ def run_uninstall_playbook():
facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
if 'ansible_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
- return run_ansible(playbook, inventory_file, facts_env)
+ return run_ansible(playbook, inventory_file, facts_env, verbose)
-def run_upgrade_playbook():
+def run_upgrade_playbook(verbose=False):
+ # TODO: do not hardcode the upgrade playbook, add ability to select the
+ # right playbook depending on the type of upgrade.
playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
- 'playbooks/adhoc/upgrades/upgrade.yml')
+ 'playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml')
# TODO: Upgrade inventory for upgrade?
inventory_file = generate_inventory(CFG.hosts)
facts_env = os.environ.copy()
@@ -170,5 +175,5 @@ def run_upgrade_playbook():
facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
if 'ansible_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
- return run_ansible(playbook, inventory_file, facts_env)
+ return run_ansible(playbook, inventory_file, facts_env, verbose)
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
index b183f0acb..fc16d9ceb 100644
--- a/utils/test/cli_installer_tests.py
+++ b/utils/test/cli_installer_tests.py
@@ -46,18 +46,21 @@ SAMPLE_CONFIG = """
variant: %s
ansible_ssh_user: root
hosts:
- - ip: 10.0.0.1
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
hostname: master-private.example.com
public_ip: 24.222.0.1
public_hostname: master.example.com
master: true
node: true
- - ip: 10.0.0.2
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
hostname: node1-private.example.com
public_ip: 24.222.0.2
public_hostname: node1.example.com
node: true
- - ip: 10.0.0.3
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
hostname: node2-private.example.com
public_ip: 24.222.0.3
public_hostname: node2.example.com
@@ -95,6 +98,76 @@ class OOCliFixture(OOInstallFixture):
f.close()
return config
+ def _verify_load_facts(self, load_facts_mock):
+ """ Check that we ran load facts with expected inputs. """
+ load_facts_args = load_facts_mock.call_args[0]
+ self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"),
+ load_facts_args[0])
+ self.assertEquals(os.path.join(self.work_dir,
+ "playbooks/byo/openshift_facts.yml"), load_facts_args[1])
+ env_vars = load_facts_args[2]
+ self.assertEquals(os.path.join(self.work_dir,
+ '.ansible/callback_facts.yaml'),
+ env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
+ self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
+
+ def _verify_run_playbook(self, run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len):
+ """ Check that we ran playbook with expected inputs. """
+ hosts = run_playbook_mock.call_args[0][0]
+ hosts_to_run_on = run_playbook_mock.call_args[0][1]
+ self.assertEquals(exp_hosts_len, len(hosts))
+ self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
+
+ def _verify_config_hosts(self, written_config, host_count):
+ print written_config['hosts']
+ self.assertEquals(host_count, len(written_config['hosts']))
+ for h in written_config['hosts']:
+ self.assertTrue(h['node'])
+ self.assertTrue('ip' in h)
+ self.assertTrue('hostname' in h)
+ self.assertTrue('public_ip' in h)
+ self.assertTrue('public_hostname' in h)
+
+ #pylint: disable=too-many-arguments
+ def _verify_get_hosts_to_run_on(self, mock_facts, load_facts_mock,
+ run_playbook_mock, cli_input,
+ exp_hosts_len=None, exp_hosts_to_run_on_len=None,
+ force=None):
+ """
+ Tests cli_installer.py:get_hosts_to_run_on. That method has quite a
+ few subtle branches in the logic. The goal with this method is simply
+ to handle all the messy stuff here and allow the main test cases to be
+ easily read. The basic idea is to modify mock_facts to return a
+ version indicating OpenShift is already installed on particular hosts.
+ """
+ load_facts_mock.return_value = (mock_facts, 0)
+ run_playbook_mock.return_value = 0
+
+ if cli_input:
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli,
+ self.cli_args,
+ input=cli_input)
+ else:
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ if force:
+ self.cli_args.append("--force")
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ written_config = self._read_yaml(config_file)
+ self._verify_config_hosts(written_config, exp_hosts_len)
+
+ self.assert_result(result, 0)
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len)
+
+ # Make sure we ran on the expected masters and nodes:
+ hosts = run_playbook_mock.call_args[0][0]
+ hosts_to_run_on = run_playbook_mock.call_args[0][1]
+ self.assertEquals(exp_hosts_len, len(hosts))
+ self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
class UnattendedCliTests(OOCliFixture):
@@ -102,6 +175,92 @@ class UnattendedCliTests(OOCliFixture):
OOCliFixture.setUp(self)
self.cli_args.append("-u")
+ # unattended with config file and all installed hosts (without --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on1(self, load_facts_mock, run_playbook_mock):
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.3']['common']['version'] = "3.0.0"
+
+ load_facts_mock.return_value = (mock_facts, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+
+ if result.exception is None or result.exit_code != 1:
+ print "Exit code: %s" % result.exit_code
+ self.fail("Unexpected CLI return")
+
+ # unattended with config file and all installed hosts (with --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on2(self, load_facts_mock, run_playbook_mock):
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.3']['common']['version'] = "3.0.0"
+ self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=3,
+ force=True)
+
+ # unattended with config file and no installed hosts (without --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on3(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+ self._verify_get_hosts_to_run_on(MOCK_FACTS, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=3,
+ force=False)
+
+ # unattended with config file and no installed hosts (with --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on4(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+ self._verify_get_hosts_to_run_on(MOCK_FACTS, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=3,
+ force=True)
+
+ # unattended with config file and some installed some uninstalled hosts (without --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on5(self, load_facts_mock, run_playbook_mock):
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+ self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=2,
+ force=False)
+
+ # unattended with config file and some installed some uninstalled hosts (with --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on6(self, load_facts_mock, run_playbook_mock):
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+ self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=3,
+ force=True)
+
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_cfg_full_run(self, load_facts_mock, run_playbook_mock):
@@ -329,7 +488,7 @@ class AttendedCliTests(OOCliFixture):
for (host, is_master) in hosts:
inputs.append(host)
inputs.append('y' if is_master else 'n')
- inputs.append('rpm')
+ #inputs.append('rpm')
if i < len(hosts) - 1:
inputs.append('y') # Add more hosts
else:
@@ -346,7 +505,7 @@ class AttendedCliTests(OOCliFixture):
for (host, is_master) in add_nodes:
inputs.append(host)
inputs.append('y' if is_master else 'n')
- inputs.append('rpm')
+ #inputs.append('rpm')
if i < len(add_nodes) - 1:
inputs.append('y') # Add more hosts
else:
@@ -360,35 +519,6 @@ class AttendedCliTests(OOCliFixture):
return '\n'.join(inputs)
- def _verify_load_facts(self, load_facts_mock):
- """ Check that we ran load facts with expected inputs. """
- load_facts_args = load_facts_mock.call_args[0]
- self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"),
- load_facts_args[0])
- self.assertEquals(os.path.join(self.work_dir,
- "playbooks/byo/openshift_facts.yml"), load_facts_args[1])
- env_vars = load_facts_args[2]
- self.assertEquals(os.path.join(self.work_dir,
- '.ansible/callback_facts.yaml'),
- env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
- self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
-
- def _verify_run_playbook(self, run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len):
- """ Check that we ran playbook with expected inputs. """
- hosts = run_playbook_mock.call_args[0][0]
- hosts_to_run_on = run_playbook_mock.call_args[0][1]
- self.assertEquals(exp_hosts_len, len(hosts))
- self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
-
- def _verify_config_hosts(self, written_config, host_count):
- self.assertEquals(host_count, len(written_config['hosts']))
- for h in written_config['hosts']:
- self.assertTrue(h['node'])
- self.assertTrue('ip' in h)
- self.assertTrue('hostname' in h)
- self.assertTrue('public_ip' in h)
- self.assertTrue('public_hostname' in h)
-
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_full_run(self, load_facts_mock, run_playbook_mock):
@@ -413,6 +543,7 @@ class AttendedCliTests(OOCliFixture):
written_config = self._read_yaml(self.config_file)
self._verify_config_hosts(written_config, 3)
+ # interactive with config file and some installed some uninstalled hosts
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_add_nodes(self, load_facts_mock, run_playbook_mock):
@@ -469,6 +600,29 @@ class AttendedCliTests(OOCliFixture):
written_config = self._read_yaml(config_file)
self._verify_config_hosts(written_config, 3)
+ #interactive with config file and all installed hosts
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on(self, load_facts_mock, run_playbook_mock):
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+
+ cli_input = self._build_input(hosts=[
+ ('10.0.0.1', True),
+ ],
+ add_nodes=[('10.0.0.2', False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y')
+
+ self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock,
+ run_playbook_mock,
+ cli_input,
+ exp_hosts_len=2,
+ exp_hosts_to_run_on_len=2,
+ force=False)
+
# TODO: test with config file, attended add node
# TODO: test with config file, attended new node already in config file
# TODO: test with config file, attended new node already in config file, plus manually added nodes
diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py
index 6dc335a0e..0dd4a30e9 100644
--- a/utils/test/oo_config_tests.py
+++ b/utils/test/oo_config_tests.py
@@ -14,18 +14,21 @@ SAMPLE_CONFIG = """
variant: openshift-enterprise
ansible_ssh_user: root
hosts:
- - ip: 10.0.0.1
+ - connect_to: master-private.example.com
+ ip: 10.0.0.1
hostname: master-private.example.com
public_ip: 24.222.0.1
public_hostname: master.example.com
master: true
node: true
- - ip: 10.0.0.2
+ - connect_to: node1-private.example.com
+ ip: 10.0.0.2
hostname: node1-private.example.com
public_ip: 24.222.0.2
public_hostname: node1.example.com
node: true
- - ip: 10.0.0.3
+ - connect_to: node2-private.example.com
+ ip: 10.0.0.3
hostname: node2-private.example.com
public_ip: 24.222.0.3
public_hostname: node2.example.com
@@ -54,16 +57,19 @@ validated_facts:
CONFIG_INCOMPLETE_FACTS = """
hosts:
- - ip: 10.0.0.1
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
hostname: master-private.example.com
public_ip: 24.222.0.1
public_hostname: master.example.com
master: true
- - ip: 10.0.0.2
- hostname: node1-private.example.com
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: 24.222.0.2
public_ip: 24.222.0.2
node: true
- - ip: 10.0.0.3
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
node: true
"""
@@ -145,7 +151,7 @@ class OOConfigTests(OOInstallFixture):
ooconfig = OOConfig(cfg_path)
self.assertEquals(3, len(ooconfig.hosts))
- self.assertEquals("10.0.0.1", ooconfig.hosts[0].name)
+ self.assertEquals("master-private.example.com", ooconfig.hosts[0].connect_to)
self.assertEquals("10.0.0.1", ooconfig.hosts[0].ip)
self.assertEquals("master-private.example.com", ooconfig.hosts[0].hostname)