diff options
28 files changed, 713 insertions, 165 deletions
| diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 92f545b25..6046a1a86 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.0.6-1 ./ +3.0.7-1 ./ diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index a57b0f895..dfd9a111e 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -7,6 +7,8 @@ Custom filters for use in openshift-ansible  from ansible import errors  from operator import itemgetter +import OpenSSL.crypto +import os.path  import pdb  import re  import json @@ -327,6 +329,68 @@ class FilterModule(object):          return revamped_outputs +    @staticmethod +    # pylint: disable=too-many-branches +    def oo_parse_certificate_names(certificates, data_dir, internal_hostnames): +        ''' Parses names from list of certificate hashes. + +            Ex: certificates = [{ "certfile": "/etc/origin/master/custom1.crt", +                                  "keyfile": "/etc/origin/master/custom1.key" }, +                                { "certfile": "custom2.crt", +                                  "keyfile": "custom2.key" }] + +                returns [{ "certfile": "/etc/origin/master/custom1.crt", +                           "keyfile": "/etc/origin/master/custom1.key", +                           "names": [ "public-master-host.com", +                                      "other-master-host.com" ] }, +                         { "certfile": "/etc/origin/master/custom2.crt", +                           "keyfile": "/etc/origin/master/custom2.key", +                           "names": [ "some-hostname.com" ] }] +        ''' +        if not issubclass(type(certificates), list): +            raise errors.AnsibleFilterError("|failed expects certificates is a list") + +        if not issubclass(type(data_dir), unicode): +            raise errors.AnsibleFilterError("|failed expects data_dir is unicode") + +        if not issubclass(type(internal_hostnames), list): +            raise errors.AnsibleFilterError("|failed expects internal_hostnames is list") + +        for certificate in certificates: +            if 'names' in certificate.keys(): +                continue +            else: +                certificate['names'] = [] + +            if not os.path.isfile(certificate['certfile']) and not os.path.isfile(certificate['keyfile']): +                # Unable to find cert/key, try to prepend data_dir to paths +                certificate['certfile'] = os.path.join(data_dir, certificate['certfile']) +                certificate['keyfile'] = os.path.join(data_dir, certificate['keyfile']) +                if not os.path.isfile(certificate['certfile']) and not os.path.isfile(certificate['keyfile']): +                    # Unable to find cert/key in data_dir +                    raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" % +                                                    (certificate['certfile'], certificate['keyfile'])) + +            try: +                st_cert = open(certificate['certfile'], 'rt').read() +                cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert) +                certificate['names'].append(str(cert.get_subject().commonName.decode())) +                for i in range(cert.get_extension_count()): +                    if cert.get_extension(i).get_short_name() == 'subjectAltName': +                        for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '): +                            certificate['names'].append(name) +            except: +                raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] + +                                                 "please specify certificate names in host inventory")) + +            certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames] +            certificate['names'] = list(set(certificate['names'])) +            if not certificate['names']: +                raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] + +                                                 "detected a collision with internal hostname, please specify " + +                                                 "certificate names in host inventory")) +        return certificates +      def filters(self):          ''' returns a mapping of filters to methods '''          return { @@ -342,5 +406,6 @@ class FilterModule(object):              "oo_combine_dict": self.oo_combine_dict,              "oo_split": self.oo_split,              "oo_filter_list": self.oo_filter_list, -            "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs +            "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs, +            "oo_parse_certificate_names": self.oo_parse_certificate_names          } diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index ad19fe116..f60918e6d 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -99,6 +99,11 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  # set RPM version for debugging purposes  #openshift_pkg_version=-3.0.0.0 +# Configure custom master certificates +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}] +# Detected names may be overridden by specifying the "names" key +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}] +  # host group for masters  [masters]  ose3-master[1:3]-ansible.test.example.com diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 8ea9120f2..df3418278 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@  }  Name:           openshift-ansible -Version:        3.0.6 +Version:        3.0.7  Release:        1%{?dist}  Summary:        Openshift and Atomic Enterprise Ansible  License:        ASL 2.0 @@ -170,6 +170,9 @@ Ansible Inventories for GCE used with the openshift-ansible scripts and playbook  %package playbooks  Summary:       Openshift and Atomic Enterprise Ansible Playbooks  Requires:      %{name} +Requires:      %{name}-roles +Requires:      %{name}-lookup-plugins +Requires:      %{name}-filter-plugins  BuildArch:     noarch  %description playbooks @@ -185,6 +188,8 @@ BuildArch:     noarch  %package roles  Summary:       Openshift and Atomic Enterprise Ansible roles  Requires:      %{name} +Requires:      %{name}-lookup-plugins +Requires:      %{name}-filter-plugins  BuildArch:     noarch  %description roles @@ -249,6 +254,45 @@ Atomic OpenShift Utilities includes  %changelog +* Wed Nov 04 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.7-1 +- added the %%util in zabbix (mwoodson@redhat.com) +- atomic-openshift-installer: Correct default playbook directory +  (smunilla@redhat.com) +- Support for gce (kwoodson@redhat.com) +- fixed a dumb naming mistake (mwoodson@redhat.com) +- added disk tps checks to zabbix (mwoodson@redhat.com) +- atomic-openshift-installer: Correct inaccurate prompt (smunilla@redhat.com) +- atomic-openshift-installer: Add default openshift-ansible-playbook +  (smunilla@redhat.com) +- ooinstall: Add check for nopwd sudo (smunilla@redhat.com) +- ooinstall: Update local install check (smunilla@redhat.com) +- oo-install: Support running on the host to be deployed (smunilla@redhat.com) +- Moving to Openshift Etcd application (mmahut@redhat.com) +- Add all the possible servicenames to openshift_all_hostnames for masters +  (sdodson@redhat.com) +- Adding openshift.node.etcd items (mmahut@redhat.com) +- Fix etcd cert generation when etcd_interface is defined (jdetiber@redhat.com) +- get zabbix ready to start tracking status of pcp (jdiaz@redhat.com) +- split inventory into subpackages (tdawson@redhat.com) +- changed the cpu alert to only alert if cpu idle more than 5x. Change alert to +  warning (mwoodson@redhat.com) +- Rename install_transactions module to openshift_ansible. +  (dgoodwin@redhat.com) +- atomic-openshift-installer: Text improvements (smunilla@redhat.com) +- Add utils subpackage missing dep on openshift-ansible-roles. +  (dgoodwin@redhat.com) +- Disable requiretty for only the openshift user (error@ioerror.us) +- Don't require tty to run sudo (error@ioerror.us) +- Attempt to remove the various interfaces left over from an install +  (bleanhar@redhat.com) +- Pulling latest gce.py module from ansible (kwoodson@redhat.com) +- Disable OpenShift features if installing Atomic Enterprise +  (jdetiber@redhat.com) +- Use default playbooks if available. (dgoodwin@redhat.com) +- Add uninstall subcommand. (dgoodwin@redhat.com) +- Add subcommands to CLI. (dgoodwin@redhat.com) +- Remove images options in oadm command (nakayamakenjiro@gmail.com) +  * Fri Oct 30 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.6-1  - Adding python-boto and python-libcloud to openshift-ansible-inventory    dependency (kwoodson@redhat.com) diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 0503b7cd4..e05ab43f8 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -45,6 +45,7 @@          - origin-master-api          - origin-master-controllers          - origin-node +        - pcsd      - yum: name={{ item }} state=absent        when: not is_atomic | bool @@ -59,6 +60,7 @@          - atomic-openshift-node          - atomic-openshift-sdn-ovs          - etcd +        - corosync          - openshift          - openshift-master          - openshift-node @@ -69,6 +71,8 @@          - origin-master          - origin-node          - origin-sdn-ovs +        - pacemaker +        - pcs          - tuned-profiles-atomic-enterprise-node          - tuned-profiles-atomic-openshift-node          - tuned-profiles-openshift-node @@ -136,8 +140,10 @@      - file: path={{ item }} state=absent        with_items: +        - "~{{ ansible_ssh_user }}/.kube"          - /etc/ansible/facts.d/openshift.fact          - /etc/atomic-enterprise +        - /etc/corosync          - /etc/etcd          - /etc/openshift          - /etc/openshift-sdn @@ -151,9 +157,13 @@          - /etc/sysconfig/origin-master          - /etc/sysconfig/origin-node          - /root/.kube -        - "~{{ ansible_ssh_user }}/.kube" +        - /run/openshift-sdn          - /usr/share/openshift/examples          - /var/lib/atomic-enterprise          - /var/lib/etcd          - /var/lib/openshift          - /var/lib/origin +        - /var/lib/pacemaker + +    - name: restart docker +      service: name=docker state=restarted diff --git a/playbooks/adhoc/upgrades/library/openshift_upgrade_config.py b/playbooks/adhoc/upgrades/library/openshift_upgrade_config.py new file mode 100755 index 000000000..60f4fd8b8 --- /dev/null +++ b/playbooks/adhoc/upgrades/library/openshift_upgrade_config.py @@ -0,0 +1,117 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# vim: expandtab:tabstop=4:shiftwidth=4 + +"""Ansible module for modifying OpenShift configs during an upgrade""" + +import os +import shutil +import yaml + +from datetime import datetime + +DOCUMENTATION = ''' +--- +module: openshift_upgrade_config +short_description: OpenShift Upgrade Config +author: Jason DeTiberus +requirements: [ ] +''' +EXAMPLES = ''' +''' + +def get_cfg_dir(): +    """Return the correct config directory to use.""" +    cfg_path = '/etc/origin/' +    if not os.path.exists(cfg_path): +        cfg_path = '/etc/openshift/' +    return cfg_path + + +def upgrade_master_3_0_to_3_1(backup): +    """Main upgrade method for 3.0 to 3.1.""" +    changed = False + +    # Facts do not get transferred to the hosts where custom modules run, +    # need to make some assumptions here. +    master_config = os.path.join(get_cfg_dir(), 'master/master-config.yaml') + +    master_cfg_file = open(master_config, 'r') +    config = yaml.safe_load(master_cfg_file.read()) +    master_cfg_file.close() + +    # Remove v1beta3 from apiLevels: +    if 'apiLevels' in config and \ +        'v1beta3' in config['apiLevels']: +        config['apiLevels'].remove('v1beta3') +        changed = True +    if 'apiLevels' in config['kubernetesMasterConfig'] and \ +        'v1beta3' in config['kubernetesMasterConfig']['apiLevels']: +        config['kubernetesMasterConfig']['apiLevels'].remove('v1beta3') +        changed = True + +    # Add the new master proxy client certs: +    # TODO: re-enable this once these certs are generated during upgrade: +#    if 'proxyClientInfo' not in config['kubernetesMasterConfig']: +#        config['kubernetesMasterConfig']['proxyClientInfo'] = { +#            'certFile': 'master.proxy-client.crt', +#            'keyFile': 'master.proxy-client.key' +#       } + +    if changed: +        if backup: +            timestamp = datetime.now().strftime('%Y%m%d%H%M%S') +            basedir = os.path.split(master_config)[0] +            backup_file = os.path.join(basedir, 'master-config.yaml.bak-%s' +                                       % timestamp) +            shutil.copyfile(master_config, backup_file) +        # Write the modified config: +        out_file = open(master_config, 'w') +        out_file.write(yaml.safe_dump(config, default_flow_style=False)) +        out_file.close() + +    return changed + + +def upgrade_master(from_version, to_version, backup): +    """Upgrade entry point.""" +    if from_version == '3.0': +        if to_version == '3.1': +            return upgrade_master_3_0_to_3_1(backup) + + +def main(): +    """ main """ +    # disabling pylint errors for global-variable-undefined and invalid-name +    # for 'global module' usage, since it is required to use ansible_facts +    # pylint: disable=global-variable-undefined, invalid-name +    global module + +    module = AnsibleModule( +        argument_spec=dict( +            from_version=dict(required=True, choices=['3.0']), +            to_version=dict(required=True, choices=['3.1']), +            role=dict(required=True, choices=['master']), +            backup=dict(required=False, default=True, type='bool') +        ), +        supports_check_mode=True, +    ) + +    from_version = module.params['from_version'] +    to_version = module.params['to_version'] +    role = module.params['role'] +    backup = module.params['backup'] + +    changed = False +    if role == 'master': +        changed = upgrade_master(from_version, to_version, backup) + +    return module.exit_json(changed=changed) + +# ignore pylint errors related to the module_utils import +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import +# import module snippets +from ansible.module_utils.basic import * + +if __name__ == '__main__': +    main() diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/adhoc/upgrades/upgrade.yml index ae1d0127c..09f991b1d 100644 --- a/playbooks/adhoc/upgrades/upgrade.yml +++ b/playbooks/adhoc/upgrades/upgrade.yml @@ -1,4 +1,57 @@  --- +- name: Update deployment type +  hosts: OSEv3 +  roles: +  - openshift_facts +  post_tasks: # technically tasks are run after roles, but post_tasks is a bit more explicit. +  - openshift_facts: +      role: common +      local_facts: +        deployment_type: "{{ deployment_type }}" + +- name: Verify upgrade can proceed +  hosts: masters +  tasks: +  # Checking the global deployment type rather than host facts, this is about +  # what the user is requesting. +    - fail: msg="Deployment type enterprise not supported for upgrade" +      when: deployment_type == "enterprise" + +- name: Backup etcd +  hosts: masters +  vars: +    embedded_etcd: "{{ openshift.master.embedded_etcd }}" +    timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" +  roles: +  - openshift_facts +  tasks: +  - stat: path=/var/lib/openshift +    register: var_lib_openshift +  - name: Create origin symlink if necessary +    file: src=/var/lib/openshift/ dest=/var/lib/origin state=link +    when: var_lib_openshift.stat.exists == True +  - name: Check available disk space for etcd backup +    # We assume to be using the data dir for all backups. +    shell: > +      df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1 +    register: avail_disk + +  - name: Check current embedded etcd disk usage +    shell: > +      du -k {{ openshift.master.etcd_data_dir }} | tail -n 1 | cut -f1 +    register: etcd_disk_usage +    when: embedded_etcd | bool + +  - name: Abort if insufficient disk space for etcd backup +    fail: msg="{{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, {{ avail_disk.stdout }} Kb available." +    when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) +  - name: Install etcd (for etcdctl) +    yum: pkg=etcd state=latest +  - name: Generate etcd backup +    command: etcdctl backup --data-dir={{ openshift.master.etcd_data_dir }} --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }} +  - name: Display location of etcd backup +    debug: msg="Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}" +  - name: Upgrade base package on masters    hosts: masters    roles: @@ -9,22 +62,58 @@      - name: Upgrade base package        yum: pkg={{ openshift.common.service_type }}{{ openshift_version  }} state=latest -- name: Re-Run cluster configuration to apply latest configuration changes -  include: ../../common/openshift-cluster/config.yml +- name: Evaluate oo_first_master +  hosts: localhost    vars: -    g_etcd_group: "{{ 'etcd' }}"      g_masters_group: "{{ 'masters' }}" -    g_nodes_group: "{{ 'nodes' }}" -    openshift_cluster_id: "{{ cluster_id | default('default') }}" -    openshift_deployment_type: "{{ deployment_type }}" +  tasks: +    - name: Evaluate oo_first_master +      add_host: +        name: "{{ groups[g_masters_group][0] }}" +        groups: oo_first_master +        ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" +        ansible_sudo: "{{ g_sudo | default(omit) }}" +      when: g_masters_group in groups and (groups[g_masters_group] | length) > 0 + +# TODO: ideally we would check the new version, without installing it. (some +# kind of yum repoquery? would need to handle openshift -> atomic-openshift +# package rename) +- name: Perform upgrade version checking +  hosts: oo_first_master +  tasks: +    - name: Determine new version +      command: > +        rpm -q --queryformat '%{version}' {{ openshift.common.service_type }} +      register: _new_version + +- name: Ensure AOS 3.0.2 or Origin 1.0.6 +  hosts: oo_first_master +  tasks: +    fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later +    when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') ) + +- name: Verify upgrade can proceed +  hosts: oo_first_master +  tasks: +  # Checking the global deployment type rather than host facts, this is about +  # what the user is requesting. +  - fail: msg="Deployment type 'enterprise' must be updated to 'openshift-enterprise' for upgrade to proceed" +    when: deployment_type == "enterprise" and (_new_version.stdout | version_compare('1.0.7', '>=') or _new_version.stdout | version_compare('3.1', '>='))  - name: Upgrade masters    hosts: masters    vars:      openshift_version: "{{ openshift_pkg_version | default('') }}"    tasks: +    - name: Upgrade to latest available kernel +      yum: pkg=kernel state=latest +    - name: display just the deployment_type variable for the current host +      debug: +        var: hostvars[inventory_hostname]      - name: Upgrade master packages -      yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest +      command: yum update -y {{ openshift.common.service_type }}-master{{ openshift_version }} +    - name: Upgrade master configuration. +      openshift_upgrade_config: from_version=3.0 to_version=3.1 role=master      - name: Restart master services        service: name="{{ openshift.common.service_type}}-master" state=restarted @@ -32,26 +121,14 @@    hosts: nodes    vars:      openshift_version: "{{ openshift_pkg_version | default('') }}" +  roles: +  - openshift_facts    tasks:      - name: Upgrade node packages -      yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest +      command: yum update -y {{ openshift.common.service_type }}-node{{ openshift_version }}      - name: Restart node services        service: name="{{ openshift.common.service_type }}-node" state=restarted -- name: Determine new master version -  hosts: oo_first_master -  tasks: -    - name: Determine new version -      command: > -        rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}-master -      register: _new_version - -- name: Ensure AOS 3.0.2 or Origin 1.0.6 -  hosts: oo_first_master -  tasks: -    fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later -    when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') ) -  - name: Update cluster policy    hosts: oo_first_master    tasks: diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml index 786918929..09bf34666 100644 --- a/playbooks/aws/openshift-cluster/launch.yml +++ b/playbooks/aws/openshift-cluster/launch.yml @@ -11,7 +11,7 @@        msg: Deployment type not supported for aws provider yet      when: deployment_type == 'enterprise' -  - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml +  - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml    - include: tasks/launch_instances.yml      vars:        instances: "{{ etcd_names }}" @@ -19,7 +19,7 @@        type: "{{ k8s_type }}"        g_sub_host_type: "default" -  - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml +  - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml    - include: tasks/launch_instances.yml      vars:        instances: "{{ master_names }}" @@ -27,7 +27,7 @@        type: "{{ k8s_type }}"        g_sub_host_type: "default" -  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml +  - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml      vars:        type: "compute"        count: "{{ num_nodes }}" @@ -38,7 +38,7 @@        type: "{{ k8s_type }}"        g_sub_host_type: "{{ sub_host_type }}" -  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml +  - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml      vars:        type: "infra"        count: "{{ num_infra }}" diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 4c74f96db..57de7130b 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,68 +1,5 @@  --- -- name: Populate config host groups -  hosts: localhost -  gather_facts: no -  tasks: -  - fail: -      msg: This playbook rquires g_etcd_group to be set -    when: g_etcd_group is not defined - -  - fail: -      msg: This playbook rquires g_masters_group to be set -    when: g_masters_group is not defined - -  - fail: -      msg: This playbook rquires g_nodes_group to be set -    when: g_nodes_group is not defined - -  - name: Evaluate oo_etcd_to_config -    add_host: -      name: "{{ item }}" -      groups: oo_etcd_to_config -      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" -      ansible_sudo: "{{ g_sudo | default(omit) }}" -    with_items: groups[g_etcd_group] | default([]) - -  - name: Evaluate oo_masters_to_config -    add_host: -      name: "{{ item }}" -      groups: oo_masters_to_config -      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" -      ansible_sudo: "{{ g_sudo | default(omit) }}" -    with_items: groups[g_masters_group] | default([]) - -  - name: Evaluate oo_nodes_to_config -    add_host: -      name: "{{ item }}" -      groups: oo_nodes_to_config -      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" -      ansible_sudo: "{{ g_sudo | default(omit) }}" -    with_items: groups[g_nodes_group] | default([]) - -  - name: Evaluate oo_nodes_to_config -    add_host: -      name: "{{ item }}" -      groups: oo_nodes_to_config -      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" -      ansible_sudo: "{{ g_sudo | default(omit) }}" -    with_items: groups[g_masters_group] | default([]) -    when: g_nodeonmaster is defined and g_nodeonmaster == true - -  - name: Evaluate oo_first_etcd -    add_host: -      name: "{{ groups[g_etcd_group][0] }}" -      groups: oo_first_etcd -      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" -      ansible_sudo: "{{ g_sudo | default(omit) }}" -    when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0 - -  - name: Evaluate oo_first_master -    add_host: -      name: "{{ groups[g_masters_group][0] }}" -      groups: oo_first_master -      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" -      ansible_sudo: "{{ g_sudo | default(omit) }}" -    when: g_masters_group in groups and (groups[g_masters_group] | length) > 0 +- include: evaluate_groups.yml  - include: ../openshift-etcd/config.yml diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml new file mode 100644 index 000000000..1919660dd --- /dev/null +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -0,0 +1,64 @@ +--- +- name: Populate config host groups +  hosts: localhost +  gather_facts: no +  tasks: +  - fail: +      msg: This playbook rquires g_etcd_group to be set +    when: g_etcd_group is not defined + +  - fail: +      msg: This playbook rquires g_masters_group to be set +    when: g_masters_group is not defined + +  - fail: +      msg: This playbook rquires g_nodes_group to be set +    when: g_nodes_group is not defined + +  - name: Evaluate oo_etcd_to_config +    add_host: +      name: "{{ item }}" +      groups: oo_etcd_to_config +      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" +      ansible_sudo: "{{ g_sudo | default(omit) }}" +    with_items: groups[g_etcd_group] | default([]) + +  - name: Evaluate oo_masters_to_config +    add_host: +      name: "{{ item }}" +      groups: oo_masters_to_config +      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" +      ansible_sudo: "{{ g_sudo | default(omit) }}" +    with_items: groups[g_masters_group] | default([]) + +  - name: Evaluate oo_nodes_to_config +    add_host: +      name: "{{ item }}" +      groups: oo_nodes_to_config +      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" +      ansible_sudo: "{{ g_sudo | default(omit) }}" +    with_items: groups[g_nodes_group] | default([]) + +  - name: Evaluate oo_nodes_to_config +    add_host: +      name: "{{ item }}" +      groups: oo_nodes_to_config +      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" +      ansible_sudo: "{{ g_sudo | default(omit) }}" +    with_items: groups[g_masters_group] | default([]) +    when: g_nodeonmaster is defined and g_nodeonmaster == true + +  - name: Evaluate oo_first_etcd +    add_host: +      name: "{{ groups[g_etcd_group][0] }}" +      groups: oo_first_etcd +      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" +    when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0 + +  - name: Evaluate oo_first_master +    add_host: +      name: "{{ groups[g_masters_group][0] }}" +      groups: oo_first_master +      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" +      ansible_sudo: "{{ g_sudo | default(omit) }}" +    when: g_masters_group in groups and (groups[g_masters_group] | length) > 0 diff --git a/playbooks/common/openshift-cluster/scaleup.yml b/playbooks/common/openshift-cluster/scaleup.yml new file mode 100644 index 000000000..201320de8 --- /dev/null +++ b/playbooks/common/openshift-cluster/scaleup.yml @@ -0,0 +1,7 @@ +--- +- include: evaluate_groups.yml + +- include: ../openshift-node/config.yml +  vars: +    osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}" +    osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}" diff --git a/playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml index 1a6580795..1a6580795 100644 --- a/playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml +++ b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml diff --git a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml index 36d7b7870..36d7b7870 100644 --- a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml +++ b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml diff --git a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml index 278942f8b..278942f8b 100644 --- a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml +++ b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 1dec923fc..59c4b2370 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -199,9 +199,18 @@        validate_checksum: yes      with_items: masters_needing_certs +- name: Inspect named certificates +  hosts: oo_first_master +  tasks: +  - name: Collect certificate names +    set_fact: +      parsed_named_certificates: "{{ openshift_master_named_certificates | oo_parse_certificate_names(master_cert_config_dir, openshift.common.internal_hostnames) }}" +    when: openshift_master_named_certificates is defined +  - name: Configure master instances    hosts: oo_masters_to_config    vars: +    named_certificates: "{{ hostvars[groups['oo_first_master'][0]]['parsed_named_certificates'] | default([])}}"      sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"      openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"      embedded_etcd: "{{ openshift.master.embedded_etcd }}" diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml index c22b897d5..8be5d53e7 100644 --- a/playbooks/gce/openshift-cluster/launch.yml +++ b/playbooks/gce/openshift-cluster/launch.yml @@ -9,7 +9,7 @@    - fail: msg="Deployment type not supported for gce provider yet"      when: deployment_type == 'enterprise' -  - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml +  - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml    - include: tasks/launch_instances.yml      vars:        instances: "{{ master_names }}" @@ -17,7 +17,7 @@        type: "{{ k8s_type }}"        g_sub_host_type: "default" -  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml +  - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml      vars:        type: "compute"        count: "{{ num_nodes }}" @@ -28,7 +28,7 @@        type: "{{ k8s_type }}"        g_sub_host_type: "{{ sub_host_type }}" -  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml +  - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml      vars:        type: "infra"        count: "{{ num_infra }}" diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml index d3e768de5..8d7949dd1 100644 --- a/playbooks/libvirt/openshift-cluster/launch.yml +++ b/playbooks/libvirt/openshift-cluster/launch.yml @@ -17,7 +17,7 @@    - include: tasks/configure_libvirt.yml -  - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml +  - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml    - include: tasks/launch_instances.yml      vars:        instances: "{{ etcd_names }}" @@ -25,7 +25,7 @@        type: "{{ k8s_type }}"        g_sub_host_type: "default" -  - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml +  - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml    - include: tasks/launch_instances.yml      vars:        instances: "{{ master_names }}" @@ -33,7 +33,7 @@        type: "{{ k8s_type }}"        g_sub_host_type: "default" -  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml +  - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml      vars:        type: "compute"        count: "{{ num_nodes }}" @@ -44,7 +44,7 @@        type: "{{ k8s_type }}"        g_sub_host_type: "{{ sub_host_type }}" -  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml +  - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml      vars:        type: "infra"        count: "{{ num_infra }}" diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml index bce6a8745..f6919dada 100644 --- a/roles/openshift_ansible_inventory/tasks/main.yml +++ b/roles/openshift_ansible_inventory/tasks/main.yml @@ -1,7 +1,11 @@  ---  - yum: -    name: openshift-ansible-inventory +    name: "{{ item }}"      state: present +  with_items: +  - openshift-ansible-inventory +  - openshift-ansible-inventory-aws +  - openshift-ansible-inventory-gce  - name:    copy: diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 19857cfd2..23016fe31 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -484,12 +484,16 @@ def set_aggregate_facts(facts):              dict: the facts dict updated with aggregated facts      """      all_hostnames = set() +    internal_hostnames = set()      if 'common' in facts:          all_hostnames.add(facts['common']['hostname'])          all_hostnames.add(facts['common']['public_hostname'])          all_hostnames.add(facts['common']['ip'])          all_hostnames.add(facts['common']['public_ip']) +        internal_hostnames.add(facts['common']['hostname']) +        internal_hostnames.add(facts['common']['ip']) +          if 'master' in facts:              # FIXME: not sure why but facts['dns']['domain'] fails              cluster_domain = 'cluster.local' @@ -497,13 +501,23 @@ def set_aggregate_facts(facts):                  all_hostnames.add(facts['master']['cluster_hostname'])              if 'cluster_public_hostname' in facts['master']:                  all_hostnames.add(facts['master']['cluster_public_hostname']) -            all_hostnames.update(['openshift', 'openshift.default', 'openshift.default.svc', -                                  'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default', -                                  'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]) +            svc_names = ['openshift', 'openshift.default', 'openshift.default.svc', +                         'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default', +                         'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain] +            all_hostnames.update(svc_names) +            internal_hostnames.update(svc_names)              first_svc_ip = str(IPNetwork(facts['master']['portal_net'])[1])              all_hostnames.add(first_svc_ip) +            internal_hostnames.add(first_svc_ip) + +            if facts['master']['embedded_etcd']: +                facts['master']['etcd_data_dir'] = os.path.join( +                    facts['common']['data_dir'], 'openshift.local.etcd') +            else: +                facts['master']['etcd_data_dir'] = '/var/lib/etcd'          facts['common']['all_hostnames'] = list(all_hostnames) +        facts['common']['internal_hostnames'] = list(all_hostnames)      return facts diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 73a0bc6cc..9547a6945 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -16,12 +16,15 @@ assetConfig:      maxRequestsInFlight: 0      requestTimeoutSeconds: 0  corsAllowedOrigins: -{% for origin in ['127.0.0.1', 'localhost', openshift.common.hostname, openshift.common.ip, openshift.common.public_hostname, openshift.common.public_ip] %} +{% for origin in ['127.0.0.1', 'localhost', openshift.common.hostname, openshift.common.ip, openshift.common.public_hostname, openshift.common.public_ip] | unique %}    - {{ origin }}  {% endfor %}  {% for custom_origin in openshift.master.custom_cors_origins | default("") %}    - {{ custom_origin }}  {% endfor %} +{% for name in (named_certificates | map(attribute='names')) | list | oo_flatten %} +  - {{ name }} +{% endfor %}  {% if 'disabled_features' in openshift.master %}  disabledFeatures: {{ openshift.master.disabled_features | to_json }}  {% endif %} @@ -133,3 +136,14 @@ servingInfo:    keyFile: master.server.key    maxRequestsInFlight: 500    requestTimeoutSeconds: 3600 +{% if named_certificates %} +  namedCertificates: +{% for named_certificate in named_certificates %} +  - certFile: {{ named_certificate['certfile'] }} +    keyFile: {{ named_certificate['keyfile'] }} +    names: +{% for name in named_certificate['names'] %} +    - "{{ name }}" +{% endfor %} +{% endfor %} +{% endif %} diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index 4b39b043a..e966e793e 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -34,9 +34,6 @@      - serviceaccounts.private.key      - serviceaccounts.public.key -- debug: msg="{{ item.openshift.master.all_hostnames | join (',') }}" -  with_items: masters_needing_certs -  - name: Create the master certificates if they do not already exist    command: >      {{ openshift.common.admin_binary }} create-master-certs diff --git a/roles/os_zabbix/vars/template_os_linux.yml b/roles/os_zabbix/vars/template_os_linux.yml index d494f1bad..fbc20cd63 100644 --- a/roles/os_zabbix/vars/template_os_linux.yml +++ b/roles/os_zabbix/vars/template_os_linux.yml @@ -224,6 +224,14 @@ g_template_os_linux:      applications:      - Disk +  - discoveryrule_key: disc.disk +    name: "Percent Utilized for disk {#OSO_DISK}" +    key: "disc.disk.putil[{#OSO_DISK}]" +    value_type: float +    description: "PCP disk.dev.avactive metric measured over a period of time.  This is the '%util' in the iostat command" +    applications: +    - Disk +    ztriggerprototypes:    - name: 'Filesystem: {#OSO_FILESYS} has less than 15% free disk space on {HOST.NAME}'      expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>85' diff --git a/utils/docs/config.md b/utils/docs/config.md index 9399409dd..ee4b157c9 100644 --- a/utils/docs/config.md +++ b/utils/docs/config.md @@ -7,6 +7,7 @@ The default location this config file will be written to ~/.config/openshift/ins  ## Example  ``` +version: v1  variant: openshift-enterprise  variant_version: 3.0  ansible_ssh_user: root @@ -32,6 +33,10 @@ hosts:  ## Primary Settings +### version + +Indicates the version of configuration this file was written with. Current implementation is v1. +  ### variant  The OpenShift variant to install. Currently valid options are: diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index d3ee8c51e..8bee99f90 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -331,7 +331,22 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):      # Check if master or nodes already have something installed      installed_hosts = get_installed_hosts(oo_cfg.hosts, callback_facts)      if len(installed_hosts) > 0: -        # present a message listing already installed hosts +        click.echo('Installed environment detected.') +        # This check has to happen before we start removing hosts later in this method +        if not force: +            if not unattended: +                click.echo('By default the installer only adds new nodes to an installed environment.') +                response = click.prompt('Do you want to (1) only add additional nodes or ' \ +                                        '(2) perform a clean install?', type=int) +                # TODO: this should be reworked with error handling. +                # Click can certainly do this for us. +                # This should be refactored as soon as we add a 3rd option. +                if response == 1: +                    force = False +                if response == 2: +                    force = True + +        # present a message listing already installed hosts and remove hosts if needed          for host in installed_hosts:              if host.master:                  click.echo("{} is already an OpenShift Master".format(host)) @@ -339,32 +354,42 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):                  # new nodes.              elif host.node:                  click.echo("{} is already an OpenShift Node".format(host)) -                hosts_to_run_on.remove(host) -        # for unattended either continue if they force install or exit if they didn't -        if unattended: -            if not force: -                click.echo('Installed environment detected and no additional nodes specified: ' \ -                           'aborting. If you want a fresh install, use --force') -                sys.exit(1) -        # for attended ask the user what to do +                # force is only used for reinstalls so we don't want to remove +                # anything. +                if not force: +                    hosts_to_run_on.remove(host) + +        # Handle the cases where we know about uninstalled systems +        new_hosts = set(hosts_to_run_on) - set(installed_hosts) +        if len(new_hosts) > 0: +            for new_host in new_hosts: +                click.echo("{} is currently uninstalled".format(new_host)) + +            # Fall through +            click.echo('Adding additional nodes...')          else: -            click.echo('Installed environment detected and no additional nodes specified. ') -            response = click.prompt('Do you want to (1) add more nodes or ' \ -                                    '(2) perform a clean install?', type=int) -            if response == 1: # add more nodes -                new_nodes = collect_new_nodes() - -                hosts_to_run_on.extend(new_nodes) -                oo_cfg.hosts.extend(new_nodes) - -                openshift_ansible.set_config(oo_cfg) -                callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts) -                if error: -                    click.echo("There was a problem fetching the required information. " \ -                               "See {} for details.".format(oo_cfg.settings['ansible_log_path'])) +            if unattended: +                if not force: +                    click.echo('Installed environment detected and no additional nodes specified: ' \ +                               'aborting. If you want a fresh install, use ' \ +                               '`atomic-openshift-installer install --force`')                      sys.exit(1)              else: -                pass # proceeding as normal should do a clean install +                if not force: +                    new_nodes = collect_new_nodes() + +                    hosts_to_run_on.extend(new_nodes) +                    oo_cfg.hosts.extend(new_nodes) + +                    openshift_ansible.set_config(oo_cfg) +                    click.echo('Gathering information from hosts...') +                    callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts) +                    if error: +                        click.echo("There was a problem fetching the required information. " \ +                                   "See {} for details.".format(oo_cfg.settings['ansible_log_path'])) +                        sys.exit(1) +                else: +                    pass # proceeding as normal should do a clean install      return hosts_to_run_on, callback_facts @@ -385,7 +410,7 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):                                dir_okay=True,                                readable=True),                # callback=validate_ansible_dir, -              default='/usr/share/ansible/openshift-ansible/', +              default=DEFAULT_PLAYBOOK_DIR,                envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY')  @click.option('--ansible-config',      type=click.Path(file_okay=True, @@ -459,6 +484,43 @@ def uninstall(ctx):      openshift_ansible.run_uninstall_playbook() +@click.command() +@click.pass_context +def upgrade(ctx): +    oo_cfg = ctx.obj['oo_cfg'] + +    if len(oo_cfg.hosts) == 0: +        click.echo("No hosts defined in: %s" % oo_cfg['configuration']) +        sys.exit(1) + +    # Update config to reflect the version we're targetting, we'll write +    # to disk once ansible completes successfully, not before. +    old_variant = oo_cfg.settings['variant'] +    old_version = oo_cfg.settings['variant_version'] +    if oo_cfg.settings['variant'] == 'enterprise': +        oo_cfg.settings['variant'] = 'openshift-enterprise' +    version = find_variant(oo_cfg.settings['variant'])[1] +    oo_cfg.settings['variant_version'] = version.name +    click.echo("Openshift will be upgraded from %s %s to %s %s on the following hosts:\n" % ( +        old_variant, old_version, oo_cfg.settings['variant'], +        oo_cfg.settings['variant_version'])) +    for host in oo_cfg.hosts: +        click.echo("  * %s" % host.name) + +    if not ctx.obj['unattended']: +        # Prompt interactively to confirm: +        proceed = click.confirm("\nDo you wish to proceed?") +        if not proceed: +            click.echo("Upgrade cancelled.") +            sys.exit(0) + +    retcode = openshift_ansible.run_upgrade_playbook() +    if retcode > 0: +        click.echo("Errors encountered during upgrade, please check %s." % +            oo_cfg.settings['ansible_log_path']) +    else: +        click.echo("Upgrade completed! Rebooting all hosts is recommended.") +  @click.command()  @click.option('--force', '-f', is_flag=True, default=False) @@ -523,6 +585,7 @@ http://docs.openshift.com/enterprise/latest/admin_guide/overview.html          click.pause()  cli.add_command(install) +cli.add_command(upgrade)  cli.add_command(uninstall)  if __name__ == '__main__': diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py index a2f53cf78..4281947f1 100644 --- a/utils/src/ooinstall/oo_config.py +++ b/utils/src/ooinstall/oo_config.py @@ -12,6 +12,7 @@ PERSIST_SETTINGS = [      'ansible_log_path',      'variant',      'variant_version', +    'version',      ]  REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname'] @@ -73,7 +74,6 @@ class Host(object):  class OOConfig(object): -    new_config = True      default_dir = os.path.normpath(          os.environ.get('XDG_CONFIG_HOME',                         os.environ['HOME'] + '/.config/') + '/openshift/') @@ -86,19 +86,22 @@ class OOConfig(object):              self.config_path = os.path.normpath(self.default_dir +                                                  self.default_file)          self.settings = {} -        self.read_config() -        self.set_defaults() +        self._read_config() +        self._set_defaults() -    def read_config(self, is_new=False): +    def _read_config(self):          self.hosts = []          try: -            new_settings = None              if os.path.exists(self.config_path):                  cfgfile = open(self.config_path, 'r') -                new_settings = yaml.safe_load(cfgfile.read()) +                self.settings = yaml.safe_load(cfgfile.read())                  cfgfile.close() -            if new_settings: -                self.settings = new_settings + +                # Use the presence of a Description as an indicator this is +                # a legacy config file: +                if 'Description' in self.settings: +                    self._upgrade_legacy_config() +                  # Parse the hosts into DTO objects:                  if 'hosts' in self.settings:                      for host in self.settings['hosts']: @@ -114,9 +117,28 @@ class OOConfig(object):                                                                                ferr.strerror))          except yaml.scanner.ScannerError:              raise OOConfigFileError('Config file "{}" is not a valid YAML document'.format(self.config_path)) -        self.new_config = is_new -    def set_defaults(self): +    def _upgrade_legacy_config(self): +        new_hosts = [] +        if 'validated_facts' in self.settings: +            for key, value in self.settings['validated_facts'].iteritems(): +                if 'masters' in self.settings and key in self.settings['masters']: +                    value['master'] = True +                if 'nodes' in self.settings and key in self.settings['nodes']: +                    value['node'] = True +                new_hosts.append(value) +        self.settings['hosts'] = new_hosts + +        remove_settings = ['validated_facts', 'Description', 'Name', +            'Subscription', 'Vendor', 'Version', 'masters', 'nodes'] +        for s in remove_settings: +            del self.settings[s] + +        # A legacy config implies openshift-enterprise 3.0: +        self.settings['variant'] = 'openshift-enterprise' +        self.settings['variant_version'] = '3.0' + +    def _set_defaults(self):          if 'ansible_inventory_directory' not in self.settings:              self.settings['ansible_inventory_directory'] = \ @@ -125,6 +147,8 @@ class OOConfig(object):              os.makedirs(self.settings['ansible_inventory_directory'])          if 'ansible_plugins_directory' not in self.settings:              self.settings['ansible_plugins_directory'] = resource_filename(__name__, 'ansible_plugins') +        if 'version' not in self.settings: +            self.settings['version'] = 'v1'          if 'ansible_callback_facts_yaml' not in self.settings:              self.settings['ansible_callback_facts_yaml'] = '%s/callback_facts.yaml' % \ diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 0def72cfd..0648df0fa 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -16,10 +16,8 @@ def set_config(cfg):      CFG = cfg  def generate_inventory(hosts): -    print hosts      global CFG -    installer_host = socket.gethostname()      base_inventory_path = CFG.settings['ansible_inventory_path']      base_inventory = open(base_inventory_path, 'w')      base_inventory.write('\n[OSEv3:children]\nmasters\nnodes\n') @@ -33,25 +31,18 @@ def generate_inventory(hosts):          version=CFG.settings.get('variant_version', None))[1]      base_inventory.write('deployment_type={}\n'.format(ver.ansible_key)) -    if 'OO_INSTALL_DEVEL_REGISTRY' in os.environ: -        base_inventory.write('oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:' -            '5001/openshift3/ose-${component}:${version}\n') -    if 'OO_INSTALL_PUDDLE_REPO_ENABLE' in os.environ: -        base_inventory.write("openshift_additional_repos=[{'id': 'ose-devel', " +    if 'OO_INSTALL_ADDITIONAL_REGISTRIES' in os.environ: +        base_inventory.write('cli_docker_additional_registries={}\n' +          .format(os.environ['OO_INSTALL_ADDITIONAL_REGISTRIES'])) +    if 'OO_INSTALL_INSECURE_REGISTRIES' in os.environ: +        base_inventory.write('cli_docker_insecure_registries={}\n' +          .format(os.environ['OO_INSTALL_INSECURE_REGISTRIES'])) +    if 'OO_INSTALL_PUDDLE_REPO' in os.environ: +        # We have to double the '{' here for literals +        base_inventory.write("openshift_additional_repos=[{{'id': 'ose-devel', "              "'name': 'ose-devel', " -            "'baseurl': 'http://buildvm-devops.usersys.redhat.com" -            "/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHAOS-3.1/$basearch/os', " -            "'enabled': 1, 'gpgcheck': 0}]\n") -    if 'OO_INSTALL_STAGE_REGISTRY' in os.environ: -        base_inventory.write('oreg_url=registry.access.stage.redhat.com/openshift3/ose-${component}:${version}\n') - -    if any(host.hostname == installer_host or host.public_hostname == installer_host -            for host in hosts): -        no_pwd_sudo = subprocess.call(['sudo', '-v', '--non-interactive']) -        if no_pwd_sudo == 1: -            print 'The atomic-openshift-installer requires sudo access without a password.' -            sys.exit(1) -        base_inventory.write("ansible_connection=local\n") +            "'baseurl': '{}', " +            "'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO']))      base_inventory.write('\n[masters]\n')      masters = (host for host in hosts if host.master) @@ -73,6 +64,7 @@ def generate_inventory(hosts):  def write_host(host, inventory, scheduleable=True):      global CFG +      facts = ''      if host.ip:          facts += ' openshift_ip={}'.format(host.ip) @@ -86,6 +78,16 @@ def write_host(host, inventory, scheduleable=True):      # Technically only nodes will ever need this.      if not scheduleable:          facts += ' openshift_scheduleable=False' +    installer_host = socket.gethostname() +    if host.hostname == installer_host or host.public_hostname == installer_host: +        facts += ' ansible_connection=local' +        if os.geteuid() != 0: +            no_pwd_sudo = subprocess.call(['sudo', '-v', '-n']) +            if no_pwd_sudo == 1: +                print 'The atomic-openshift-installer requires sudo access without a password.' +                sys.exit(1) +            facts += ' ansible_become=true' +      inventory.write('{} {}\n'.format(host, facts)) @@ -145,6 +147,7 @@ def run_ansible(playbook, inventory, env_vars):                               playbook],                               env=env_vars) +  def run_uninstall_playbook():      playbook = os.path.join(CFG.settings['ansible_playbook_directory'],          'playbooks/adhoc/uninstall.yml') @@ -155,3 +158,17 @@ def run_uninstall_playbook():      if 'ansible_config' in CFG.settings:          facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']      return run_ansible(playbook, inventory_file, facts_env) + + +def run_upgrade_playbook(): +    playbook = os.path.join(CFG.settings['ansible_playbook_directory'], +        'playbooks/adhoc/upgrades/upgrade.yml') +    # TODO: Upgrade inventory for upgrade? +    inventory_file = generate_inventory(CFG.hosts) +    facts_env = os.environ.copy() +    if 'ansible_log_path' in CFG.settings: +        facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path'] +    if 'ansible_config' in CFG.settings: +        facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config'] +    return run_ansible(playbook, inventory_file, facts_env) + diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py index 05281d654..3bb61dddb 100644 --- a/utils/src/ooinstall/variants.py +++ b/utils/src/ooinstall/variants.py @@ -29,6 +29,9 @@ class Variant(object):          self.versions = versions +    def latest_version(self): +        return self.versions[-1] +  # WARNING: Keep the versions ordered, most recent last:  OSE = Variant('openshift-enterprise', 'OpenShift Enterprise', @@ -58,7 +61,7 @@ def find_variant(name, version=None):      for prod in SUPPORTED_VARIANTS:          if prod.name == name:              if version is None: -                return (prod, prod.versions[-1]) +                return (prod, prod.latest_version())              for v in prod.versions:                  if v.name == version:                      return (prod, v) diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py index 01af33fd9..6dc335a0e 100644 --- a/utils/test/oo_config_tests.py +++ b/utils/test/oo_config_tests.py @@ -32,6 +32,26 @@ hosts:      node: true  """ +# Used to test automatic upgrading of config: +LEGACY_CONFIG = """ +Description: This is the configuration file for the OpenShift Ansible-Based Installer. +Name: OpenShift Ansible-Based Installer Configuration +Subscription: {type: none} +Vendor: OpenShift Community +Version: 0.0.1 +ansible_config: /tmp/notreal/ansible.cfg +ansible_inventory_directory: /tmp/notreal/.config/openshift/.ansible +ansible_log_path: /tmp/ansible.log +ansible_plugins_directory: /tmp/notreal/.python-eggs/ooinstall-3.0.0-py2.7.egg-tmp/ooinstall/ansible_plugins +masters: [10.0.0.1] +nodes: [10.0.0.2, 10.0.0.3] +validated_facts: +  10.0.0.1: {hostname: master-private.example.com, ip: 10.0.0.1, public_hostname: master.example.com, public_ip: 24.222.0.1} +  10.0.0.2: {hostname: node1-private.example.com, ip: 10.0.0.2, public_hostname: node1.example.com, public_ip: 24.222.0.2} +  10.0.0.3: {hostname: node2-private.example.com, ip: 10.0.0.3, public_hostname: node2.example.com, public_ip: 24.222.0.3} +""" + +  CONFIG_INCOMPLETE_FACTS = """  hosts:    - ip: 10.0.0.1 @@ -74,6 +94,48 @@ class OOInstallFixture(unittest.TestCase):          return path +class LegacyOOConfigTests(OOInstallFixture): + +    def setUp(self): +        OOInstallFixture.setUp(self) +        self.cfg_path = self.write_config(os.path.join(self.work_dir, +            'ooinstall.conf'), LEGACY_CONFIG) +        self.cfg = OOConfig(self.cfg_path) + +    def test_load_config_memory(self): +        self.assertEquals('openshift-enterprise', self.cfg.settings['variant']) +        self.assertEquals('3.0', self.cfg.settings['variant_version']) +        self.assertEquals('v1', self.cfg.settings['version']) + +        self.assertEquals(3, len(self.cfg.hosts)) +        h1 = self.cfg.get_host('10.0.0.1') +        self.assertEquals('10.0.0.1', h1.ip) +        self.assertEquals('24.222.0.1', h1.public_ip) +        self.assertEquals('master-private.example.com', h1.hostname) +        self.assertEquals('master.example.com', h1.public_hostname) + +        h2 = self.cfg.get_host('10.0.0.2') +        self.assertEquals('10.0.0.2', h2.ip) +        self.assertEquals('24.222.0.2', h2.public_ip) +        self.assertEquals('node1-private.example.com', h2.hostname) +        self.assertEquals('node1.example.com', h2.public_hostname) + +        h3 = self.cfg.get_host('10.0.0.3') +        self.assertEquals('10.0.0.3', h3.ip) +        self.assertEquals('24.222.0.3', h3.public_ip) +        self.assertEquals('node2-private.example.com', h3.hostname) +        self.assertEquals('node2.example.com', h3.public_hostname) + +        self.assertFalse('masters' in self.cfg.settings) +        self.assertFalse('nodes' in self.cfg.settings) +        self.assertFalse('Description' in self.cfg.settings) +        self.assertFalse('Name' in self.cfg.settings) +        self.assertFalse('Subscription' in self.cfg.settings) +        self.assertFalse('Vendor' in self.cfg.settings) +        self.assertFalse('Version' in self.cfg.settings) +        self.assertFalse('validates_facts' in self.cfg.settings) + +  class OOConfigTests(OOInstallFixture):      def test_load_config(self): @@ -91,6 +153,7 @@ class OOConfigTests(OOInstallFixture):                            [host['ip'] for host in ooconfig.settings['hosts']])          self.assertEquals('openshift-enterprise', ooconfig.settings['variant']) +        self.assertEquals('v1', ooconfig.settings['version'])      def test_load_complete_facts(self):          cfg_path = self.write_config(os.path.join(self.work_dir, @@ -128,6 +191,7 @@ class OOConfigTests(OOInstallFixture):          self.assertTrue('ansible_ssh_user' in written_config)          self.assertTrue('variant' in written_config) +        self.assertEquals('v1', written_config['version'])          # Some advanced settings should not get written out if they          # were not specified by the user: | 
