summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDevan Goodwin <dgoodwin@redhat.com>2015-11-05 08:35:53 -0400
committerJason DeTiberus <jdetiber@redhat.com>2015-11-05 17:10:51 -0500
commitfe4e9a4ca7028aa877fdd3895225a67b026aea11 (patch)
treea212bfbb7f443335ace471256e07cb25ab139c12
parentc73ec7b6b27483aea4bb53db0db9837ff9781d24 (diff)
downloadopenshift-fe4e9a4ca7028aa877fdd3895225a67b026aea11.tar.gz
openshift-fe4e9a4ca7028aa877fdd3895225a67b026aea11.tar.bz2
openshift-fe4e9a4ca7028aa877fdd3895225a67b026aea11.tar.xz
openshift-fe4e9a4ca7028aa877fdd3895225a67b026aea11.zip
Upgrade improvements
- Push config dir logic out of module and use host variables instead. - Backup master config with ansible utility. - Add error handling for the upgrade config module. - Add verbose option to installer. - Return details on what we changed when upgrading config. - Cleanup use of first master. - Don't install upgrade rpms to check what version we'll upgrade to.
-rwxr-xr-xplaybooks/adhoc/upgrades/library/openshift_upgrade_config.py53
-rw-r--r--playbooks/adhoc/upgrades/upgrade.yml78
-rw-r--r--utils/src/ooinstall/cli_installer.py22
-rw-r--r--utils/src/ooinstall/openshift_ansible.py41
4 files changed, 93 insertions, 101 deletions
diff --git a/playbooks/adhoc/upgrades/library/openshift_upgrade_config.py b/playbooks/adhoc/upgrades/library/openshift_upgrade_config.py
index 60f4fd8b8..0894efa52 100755
--- a/playbooks/adhoc/upgrades/library/openshift_upgrade_config.py
+++ b/playbooks/adhoc/upgrades/library/openshift_upgrade_config.py
@@ -5,11 +5,8 @@
"""Ansible module for modifying OpenShift configs during an upgrade"""
import os
-import shutil
import yaml
-from datetime import datetime
-
DOCUMENTATION = '''
---
module: openshift_upgrade_config
@@ -20,21 +17,14 @@ requirements: [ ]
EXAMPLES = '''
'''
-def get_cfg_dir():
- """Return the correct config directory to use."""
- cfg_path = '/etc/origin/'
- if not os.path.exists(cfg_path):
- cfg_path = '/etc/openshift/'
- return cfg_path
-
-def upgrade_master_3_0_to_3_1(backup):
+def upgrade_master_3_0_to_3_1(module, config_base, backup):
"""Main upgrade method for 3.0 to 3.1."""
- changed = False
+ changes = []
# Facts do not get transferred to the hosts where custom modules run,
# need to make some assumptions here.
- master_config = os.path.join(get_cfg_dir(), 'master/master-config.yaml')
+ master_config = os.path.join(config_base, 'master/master-config.yaml')
master_cfg_file = open(master_config, 'r')
config = yaml.safe_load(master_cfg_file.read())
@@ -45,6 +35,7 @@ def upgrade_master_3_0_to_3_1(backup):
'v1beta3' in config['apiLevels']:
config['apiLevels'].remove('v1beta3')
changed = True
+ changes.append("master-config.yaml: removed v1beta3 from apiLevels")
if 'apiLevels' in config['kubernetesMasterConfig'] and \
'v1beta3' in config['kubernetesMasterConfig']['apiLevels']:
config['kubernetesMasterConfig']['apiLevels'].remove('v1beta3')
@@ -57,27 +48,26 @@ def upgrade_master_3_0_to_3_1(backup):
# 'certFile': 'master.proxy-client.crt',
# 'keyFile': 'master.proxy-client.key'
# }
+# changes.append("master-config.yaml: added proxyClientInfo")
- if changed:
+ if len(changes) > 0:
if backup:
- timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
- basedir = os.path.split(master_config)[0]
- backup_file = os.path.join(basedir, 'master-config.yaml.bak-%s'
- % timestamp)
- shutil.copyfile(master_config, backup_file)
+ # TODO: Check success:
+ module.backup_local(master_config)
+
# Write the modified config:
out_file = open(master_config, 'w')
out_file.write(yaml.safe_dump(config, default_flow_style=False))
out_file.close()
- return changed
+ return changes
-def upgrade_master(from_version, to_version, backup):
+def upgrade_master(module, config_base, from_version, to_version, backup):
"""Upgrade entry point."""
if from_version == '3.0':
if to_version == '3.1':
- return upgrade_master_3_0_to_3_1(backup)
+ return upgrade_master_3_0_to_3_1(module, config_base, backup)
def main():
@@ -89,6 +79,7 @@ def main():
module = AnsibleModule(
argument_spec=dict(
+ config_base=dict(required=True),
from_version=dict(required=True, choices=['3.0']),
to_version=dict(required=True, choices=['3.1']),
role=dict(required=True, choices=['master']),
@@ -101,12 +92,18 @@ def main():
to_version = module.params['to_version']
role = module.params['role']
backup = module.params['backup']
-
- changed = False
- if role == 'master':
- changed = upgrade_master(from_version, to_version, backup)
-
- return module.exit_json(changed=changed)
+ config_base = module.params['config_base']
+
+ try:
+ changes = []
+ if role == 'master':
+ changes = upgrade_master(module, config_base, from_version,
+ to_version, backup)
+
+ changed = len(changes) > 0
+ return module.exit_json(changed=changed, changes=changes)
+ except Exception, e:
+ return module.fail_json(msg=str(e))
# ignore pylint errors related to the module_utils import
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/adhoc/upgrades/upgrade.yml
index 09f991b1d..c113c7ab2 100644
--- a/playbooks/adhoc/upgrades/upgrade.yml
+++ b/playbooks/adhoc/upgrades/upgrade.yml
@@ -1,4 +1,12 @@
---
+- name: Verify upgrade can proceed
+ hosts: masters
+ tasks:
+ # Checking the global deployment type rather than host facts, this is about
+ # what the user is requesting.
+ - fail: msg="Deployment type enterprise not supported for upgrade"
+ when: deployment_type == "enterprise"
+
- name: Update deployment type
hosts: OSEv3
roles:
@@ -9,14 +17,6 @@
local_facts:
deployment_type: "{{ deployment_type }}"
-- name: Verify upgrade can proceed
- hosts: masters
- tasks:
- # Checking the global deployment type rather than host facts, this is about
- # what the user is requesting.
- - fail: msg="Deployment type enterprise not supported for upgrade"
- when: deployment_type == "enterprise"
-
- name: Backup etcd
hosts: masters
vars:
@@ -52,48 +52,35 @@
- name: Display location of etcd backup
debug: msg="Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
-- name: Upgrade base package on masters
- hosts: masters
- roles:
- - openshift_facts
- vars:
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- tasks:
- - name: Upgrade base package
- yum: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=latest
-
-- name: Evaluate oo_first_master
- hosts: localhost
- vars:
- g_masters_group: "{{ 'masters' }}"
- tasks:
- - name: Evaluate oo_first_master
- add_host:
- name: "{{ groups[g_masters_group][0] }}"
- groups: oo_first_master
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- when: g_masters_group in groups and (groups[g_masters_group] | length) > 0
-
-# TODO: ideally we would check the new version, without installing it. (some
-# kind of yum repoquery? would need to handle openshift -> atomic-openshift
-# package rename)
- name: Perform upgrade version checking
- hosts: oo_first_master
+ hosts: masters[0]
tasks:
- - name: Determine new version
+ - name: Determine available version
+ shell: >
+ yum list available {{ openshift.common.service_type }} | tail -n 1 | cut -f 2 -d " " | cut -f 1 -d "-"
+ register: _new_version
+ - debug: var=_new_version
+ # The above check will return nothing if the package is already installed,
+ # and we may be re-running upgrade due to a failure.
+ - name: Determine installed version
command: >
rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}
register: _new_version
+ when: _new_version.stdout == ""
+ # Fail if we still don't know:
+ - debug: var=_new_version
+ - name: Verify upgrade version
+ fail: Unable to determine upgrade version for {{ openshift.common.service_type }}
+ when: _new_version.stdout == ""
- name: Ensure AOS 3.0.2 or Origin 1.0.6
- hosts: oo_first_master
+ hosts: masters[0]
tasks:
fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later
when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )
- name: Verify upgrade can proceed
- hosts: oo_first_master
+ hosts: masters[0]
tasks:
# Checking the global deployment type rather than host facts, this is about
# what the user is requesting.
@@ -107,13 +94,10 @@
tasks:
- name: Upgrade to latest available kernel
yum: pkg=kernel state=latest
- - name: display just the deployment_type variable for the current host
- debug:
- var: hostvars[inventory_hostname]
- name: Upgrade master packages
command: yum update -y {{ openshift.common.service_type }}-master{{ openshift_version }}
- name: Upgrade master configuration.
- openshift_upgrade_config: from_version=3.0 to_version=3.1 role=master
+ openshift_upgrade_config: from_version=3.0 to_version=3.1 role=master config_base={{ hostvars[inventory_hostname].openshift.common.config_base }}
- name: Restart master services
service: name="{{ openshift.common.service_type}}-master" state=restarted
@@ -130,7 +114,7 @@
service: name="{{ openshift.common.service_type }}-node" state=restarted
- name: Update cluster policy
- hosts: oo_first_master
+ hosts: masters[0]
tasks:
- name: oadm policy reconcile-cluster-roles --confirm
command: >
@@ -138,7 +122,7 @@
policy reconcile-cluster-roles --confirm
- name: Update cluster policy bindings
- hosts: oo_first_master
+ hosts: masters[0]
tasks:
- name: oadm policy reconcile-cluster-role-bindings --confirm
command: >
@@ -151,7 +135,7 @@
when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>')
- name: Upgrade default router
- hosts: oo_first_master
+ hosts: masters[0]
vars:
- router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
- oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
@@ -189,7 +173,7 @@
'{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
- name: Upgrade default
- hosts: oo_first_master
+ hosts: masters[0]
vars:
- registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
- oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
@@ -207,7 +191,7 @@
'{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
- name: Update image streams and templates
- hosts: oo_first_master
+ hosts: masters[0]
vars:
openshift_examples_import_command: "update"
openshift_deployment_type: "{{ deployment_type }}"
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index 8bee99f90..9f0861b77 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -323,7 +323,7 @@ def get_installed_hosts(hosts, callback_facts):
installed_hosts.append(host)
return installed_hosts
-def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
+def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
# Copy the list of existing hosts so we can remove any already installed nodes.
hosts_to_run_on = list(oo_cfg.hosts)
@@ -424,9 +424,11 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
writable=True,
readable=True),
default="/tmp/ansible.log")
+@click.option('-v', '--verbose',
+ is_flag=True, default=False)
#pylint: disable=too-many-arguments
# Main CLI entrypoint, not much we can do about too many arguments.
-def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path):
+def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose):
"""
The main click CLI module. Responsible for handling most common CLI options,
assigning any defaults and adding to the context for the sub-commands.
@@ -436,6 +438,7 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf
ctx.obj['configuration'] = configuration
ctx.obj['ansible_config'] = ansible_config
ctx.obj['ansible_log_path'] = ansible_log_path
+ ctx.obj['verbose'] = verbose
oo_cfg = OOConfig(ctx.obj['configuration'])
@@ -466,6 +469,7 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf
@click.pass_context
def uninstall(ctx):
oo_cfg = ctx.obj['oo_cfg']
+ verbose = ctx.obj['verbose']
if len(oo_cfg.hosts) == 0:
click.echo("No hosts defined in: %s" % oo_cfg['configuration'])
@@ -481,13 +485,14 @@ def uninstall(ctx):
click.echo("Uninstall cancelled.")
sys.exit(0)
- openshift_ansible.run_uninstall_playbook()
+ openshift_ansible.run_uninstall_playbook(verbose)
@click.command()
@click.pass_context
def upgrade(ctx):
oo_cfg = ctx.obj['oo_cfg']
+ verbose = ctx.obj['verbose']
if len(oo_cfg.hosts) == 0:
click.echo("No hosts defined in: %s" % oo_cfg['configuration'])
@@ -514,7 +519,7 @@ def upgrade(ctx):
click.echo("Upgrade cancelled.")
sys.exit(0)
- retcode = openshift_ansible.run_upgrade_playbook()
+ retcode = openshift_ansible.run_upgrade_playbook(verbose)
if retcode > 0:
click.echo("Errors encountered during upgrade, please check %s." %
oo_cfg.settings['ansible_log_path'])
@@ -527,6 +532,7 @@ def upgrade(ctx):
@click.pass_context
def install(ctx, force):
oo_cfg = ctx.obj['oo_cfg']
+ verbose = ctx.obj['verbose']
if ctx.obj['unattended']:
error_if_missing_info(oo_cfg)
@@ -534,13 +540,15 @@ def install(ctx, force):
oo_cfg = get_missing_info_from_user(oo_cfg)
click.echo('Gathering information from hosts...')
- callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts)
+ callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts,
+ verbose)
if error:
click.echo("There was a problem fetching the required information. " \
"Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
sys.exit(1)
- hosts_to_run_on, callback_facts = get_hosts_to_run_on(oo_cfg, callback_facts, ctx.obj['unattended'], force)
+ hosts_to_run_on, callback_facts = get_hosts_to_run_on(
+ oo_cfg, callback_facts, ctx.obj['unattended'], force, verbose)
click.echo('Writing config to: %s' % oo_cfg.config_path)
@@ -562,7 +570,7 @@ If changes are needed to the values recorded by the installer please update {}.
confirm_continue(message)
error = openshift_ansible.run_main_playbook(oo_cfg.hosts,
- hosts_to_run_on)
+ hosts_to_run_on, verbose)
if error:
# The bootstrap script will print out the log location.
message = """
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index 0648df0fa..153415e8c 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -91,16 +91,17 @@ def write_host(host, inventory, scheduleable=True):
inventory.write('{} {}\n'.format(host, facts))
-def load_system_facts(inventory_file, os_facts_path, env_vars):
+def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False):
"""
Retrieves system facts from the remote systems.
"""
FNULL = open(os.devnull, 'w')
- status = subprocess.call(['ansible-playbook',
- '--inventory-file={}'.format(inventory_file),
- os_facts_path],
- env=env_vars,
- stdout=FNULL)
+ args = ['ansible-playbook', '-v'] if verbose \
+ else ['ansible-playbook']
+ args.extend([
+ '--inventory-file={}'.format(inventory_file),
+ os_facts_path])
+ status = subprocess.call(args, env=env_vars, stdout=FNULL)
if not status == 0:
return [], 1
callback_facts_file = open(CFG.settings['ansible_callback_facts_yaml'], 'r')
@@ -109,7 +110,7 @@ def load_system_facts(inventory_file, os_facts_path, env_vars):
return callback_facts, 0
-def default_facts(hosts):
+def default_facts(hosts, verbose=False):
global CFG
inventory_file = generate_inventory(hosts)
os_facts_path = '{}/playbooks/byo/openshift_facts.yml'.format(CFG.ansible_playbook_directory)
@@ -121,10 +122,10 @@ def default_facts(hosts):
facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path']
if 'ansible_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
- return load_system_facts(inventory_file, os_facts_path, facts_env)
+ return load_system_facts(inventory_file, os_facts_path, facts_env, verbose)
-def run_main_playbook(hosts, hosts_to_run_on):
+def run_main_playbook(hosts, hosts_to_run_on, verbose=False):
global CFG
inventory_file = generate_inventory(hosts)
if len(hosts_to_run_on) != len(hosts):
@@ -138,17 +139,19 @@ def run_main_playbook(hosts, hosts_to_run_on):
facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
if 'ansible_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
- return run_ansible(main_playbook_path, inventory_file, facts_env)
+ return run_ansible(main_playbook_path, inventory_file, facts_env, verbose)
-def run_ansible(playbook, inventory, env_vars):
- return subprocess.call(['ansible-playbook',
- '--inventory-file={}'.format(inventory),
- playbook],
- env=env_vars)
+def run_ansible(playbook, inventory, env_vars, verbose=False):
+ args = ['ansible-playbook', '-v'] if verbose \
+ else ['ansible-playbook']
+ args.extend([
+ '--inventory-file={}'.format(inventory),
+ playbook])
+ return subprocess.call(args, env=env_vars)
-def run_uninstall_playbook():
+def run_uninstall_playbook(verbose=False):
playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
'playbooks/adhoc/uninstall.yml')
inventory_file = generate_inventory(CFG.hosts)
@@ -157,10 +160,10 @@ def run_uninstall_playbook():
facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
if 'ansible_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
- return run_ansible(playbook, inventory_file, facts_env)
+ return run_ansible(playbook, inventory_file, facts_env, verbose)
-def run_upgrade_playbook():
+def run_upgrade_playbook(verbose=False):
playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
'playbooks/adhoc/upgrades/upgrade.yml')
# TODO: Upgrade inventory for upgrade?
@@ -170,5 +173,5 @@ def run_upgrade_playbook():
facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
if 'ansible_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
- return run_ansible(playbook, inventory_file, facts_env)
+ return run_ansible(playbook, inventory_file, facts_env, verbose)