From 2fe86fee9758471fd55de2776bf512ddf2a39b8d Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Thu, 7 Jan 2016 16:41:37 -0500 Subject: Always pull docker images --- roles/etcd/tasks/main.yml | 16 ---------------- roles/openshift_cli/tasks/main.yml | 16 ---------------- roles/openshift_master/tasks/main.yml | 16 ---------------- roles/openshift_master_ca/tasks/main.yml | 10 ++-------- roles/openshift_node/tasks/main.yml | 29 +---------------------------- 5 files changed, 3 insertions(+), 84 deletions(-) diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index e83cfc33c..1e97b047b 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -11,24 +11,8 @@ action: "{{ ansible_pkg_mgr }} name=etcd-2.* state=present" when: not openshift.common.is_containerized | bool -- name: Get docker images - command: docker images - changed_when: false - when: openshift.common.is_containerized | bool - register: docker_images - - name: Pull etcd container command: docker pull {{ openshift.etcd.etcd_image }} - when: openshift.common.is_containerized | bool and openshift.etcd.etcd_image not in docker_images.stdout - -- name: Wait for etcd image - command: > - docker images - register: docker_images - until: openshift.etcd.etcd_image in docker_images.stdout - retries: 30 - delay: 10 - changed_when: false when: openshift.common.is_containerized | bool - name: Install etcd container service file diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml index 8d7686ffd..ec77e4ebc 100644 --- a/roles/openshift_cli/tasks/main.yml +++ b/roles/openshift_cli/tasks/main.yml @@ -8,27 +8,11 @@ yum: pkg={{ openshift.common.service_type }}-clients state=installed when: not openshift.common.is_containerized | bool -- name: List Docker images - command: > - docker images - register: docker_images - - name: Pull CLI Image command: > docker pull {{ openshift.common.cli_image }} - when: openshift.common.is_containerized | bool and openshift.common.cli_image not in docker_images.stdout - -- name: Wait for CLI image - command: > - docker images - register: docker_images - until: openshift.common.cli_image in docker_images.stdout - retries: 30 - delay: 10 - changed_when: false when: openshift.common.is_containerized | bool - - name: Create /usr/local/bin/openshift cli wrapper template: src: openshift.j2 diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 397122631..3b46a0df4 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -86,25 +86,9 @@ action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_version }} state=present" when: not openshift.common.is_containerized | bool -- name: Get docker images - command: docker images - changed_when: false - when: openshift.common.is_containerized | bool - register: docker_images - - name: Pull master image command: > docker pull {{ openshift.master.master_image }} - when: openshift.common.is_containerized | bool and openshift.master.master_image not in docker_images.stdout - -- name: Wait for master image - command: > - docker images - register: docker_images - until: openshift.master.master_image in docker_images.stdout - retries: 30 - delay: 10 - changed_when: false when: openshift.common.is_containerized | bool - name: Install Master docker service file diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml index 5b4c92f2b..6d9be81c0 100644 --- a/roles/openshift_master_ca/tasks/main.yml +++ b/roles/openshift_master_ca/tasks/main.yml @@ -13,16 +13,10 @@ path: "{{ openshift_master_config_dir }}" state: directory -- name: Get docker images - command: docker images - changed_when: false - when: openshift.common.is_containerized | bool - register: docker_images - -- name: Pull required docker image +- name: Pull master docker image command: > docker pull {{ openshift.common.cli_image }} - when: openshift.common.is_containerized | bool and openshift.common.cli_image not in docker_images.stdout + when: openshift.common.is_containerized | bool - name: Create the master certificates if they do not already exist command: > diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 33852d7f8..0828d8e2c 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -44,41 +44,14 @@ action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present" when: openshift.common.use_openshift_sdn and not openshift.common.is_containerized | bool -- name: Get docker images - command: docker images - changed_when: false - when: openshift.common.is_containerized | bool - register: docker_images - - name: Pull node image command: > docker pull {{ openshift.node.node_image }} - when: openshift.common.is_containerized | bool and openshift.node.node_image not in docker_images.stdout - -- name: Wait for node image - command: > - docker images - register: docker_images - until: openshift.node.node_image in docker_images.stdout - retries: 30 - delay: 10 - changed_when: false when: openshift.common.is_containerized | bool - + - name: Pull OpenVSwitch image command: > docker pull {{ openshift.node.ovs_image }} - when: openshift.common.is_containerized | bool and openshift.node.ovs_image not in docker_images.stdout - and openshift.common.use_openshift_sdn | bool - -- name: Wait for OpenVSwitch image - command: > - docker images - register: docker_images - until: openshift.node.ovs_image in docker_images.stdout - retries: 30 - delay: 10 - changed_when: false when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool - name: Install Node docker service file -- cgit v1.2.3 From 81a8df590d40bb4fcc8902e0f9fb6a0406a0fa37 Mon Sep 17 00:00:00 2001 From: Samuel Munilla Date: Wed, 6 Jan 2016 08:47:15 -0500 Subject: atomic-openshift-installer: Populate new_nodes group Set the new_nodes group when scaling up additional nodes --- utils/src/ooinstall/cli_installer.py | 9 +++++++-- utils/src/ooinstall/oo_config.py | 4 +++- utils/src/ooinstall/openshift_ansible.py | 13 +++++++++++-- 3 files changed, 21 insertions(+), 5 deletions(-) diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index c86ba2f4f..05adc7153 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -72,7 +72,7 @@ def delete_hosts(hosts): click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx)) return hosts, None -def collect_hosts(oo_cfg, masters_set=False, print_summary=True): +def collect_hosts(oo_cfg, existing_env=False, masters_set=False, print_summary=True): """ Collect host information from user. This will later be filled in using ansible. @@ -139,6 +139,11 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen # host_props['containerized'] = False host_props['containerized'] = False + if existing_env: + host_props['new_host'] = True + else: + host_props['new_host'] = False + host = Host(**host_props) hosts.append(host) @@ -507,7 +512,7 @@ def collect_new_nodes(oo_cfg): Add new nodes here """ click.echo(message) - return collect_hosts(oo_cfg, masters_set=True, print_summary=False) + return collect_hosts(oo_cfg, existing_env=True, masters_set=True, print_summary=False) def get_installed_hosts(hosts, callback_facts): installed_hosts = [] diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py index 031b82bc1..33ab27567 100644 --- a/utils/src/ooinstall/oo_config.py +++ b/utils/src/ooinstall/oo_config.py @@ -38,6 +38,7 @@ class Host(object): self.public_hostname = kwargs.get('public_hostname', None) self.connect_to = kwargs.get('connect_to', None) self.preconfigured = kwargs.get('preconfigured', None) + self.new_host = kwargs.get('new_host', None) # Should this host run as an OpenShift master: self.master = kwargs.get('master', False) @@ -68,7 +69,8 @@ class Host(object): """ Used when exporting to yaml. """ d = {} for prop in ['ip', 'hostname', 'public_ip', 'public_hostname', - 'master', 'node', 'master_lb', 'containerized', 'connect_to', 'preconfigured']: + 'master', 'node', 'master_lb', 'containerized', + 'connect_to', 'preconfigured', 'new_host']: # If the property is defined (not None or False), export it: if getattr(self, prop): d[prop] = getattr(self, prop) diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index fd2cd7fbd..20401f812 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -19,13 +19,15 @@ def generate_inventory(hosts): global CFG masters = [host for host in hosts if host.master] nodes = [host for host in hosts if host.node] + new_nodes = [host for host in hosts if host.node and host.new_host] proxy = determine_proxy_configuration(hosts) multiple_masters = len(masters) > 1 + scaleup = len(new_nodes) > 0 base_inventory_path = CFG.settings['ansible_inventory_path'] base_inventory = open(base_inventory_path, 'w') - write_inventory_children(base_inventory, multiple_masters, proxy) + write_inventory_children(base_inventory, multiple_masters, proxy, scaleup) write_inventory_vars(base_inventory, multiple_masters, proxy) @@ -71,6 +73,11 @@ def generate_inventory(hosts): base_inventory.write('\n[lb]\n') write_host(proxy, base_inventory) + if scaleup: + base_inventory.write('\n[new_nodes]\n') + for node in new_nodes: + write_host(node, base_inventory) + base_inventory.close() return base_inventory_path @@ -84,12 +91,14 @@ def determine_proxy_configuration(hosts): return None -def write_inventory_children(base_inventory, multiple_masters, proxy): +def write_inventory_children(base_inventory, multiple_masters, proxy, scaleup): global CFG base_inventory.write('\n[OSEv3:children]\n') base_inventory.write('masters\n') base_inventory.write('nodes\n') + if scaleup: + base_inventory.write('new_nodes\n') if multiple_masters: base_inventory.write('etcd\n') if not getattr(proxy, 'preconfigured', True): -- cgit v1.2.3 From 407a83b1ceaeb96791edc8424bfccf59cd66c9e0 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Mon, 11 Jan 2016 09:28:35 -0500 Subject: Fixing tab completion for latest metadata changes --- bin/ossh_bash_completion | 12 ++++++------ bin/ossh_zsh_completion | 6 +++--- bin/zsh_functions/_ossh | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/bin/ossh_bash_completion b/bin/ossh_bash_completion index 440fa0a45..9ea8ad809 100755 --- a/bin/ossh_bash_completion +++ b/bin/ossh_bash_completion @@ -1,12 +1,12 @@ __ossh_known_hosts(){ if python -c 'import openshift_ansible' &>/dev/null; then - /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])' + /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])' elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then - /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])' + /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])' elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then - /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])' + /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])' fi } @@ -26,13 +26,13 @@ complete -F _ossh ossh oscp __opssh_known_hosts(){ if python -c 'import openshift_ansible' &>/dev/null; then - /usr/bin/python -c 'from openshift_ansible.multi_inventory import MultiInventory; m=MultiInventory(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])' + /usr/bin/python -c 'from openshift_ansible.multi_inventory import MultiInventory; m=MultiInventory(); m.run(); print "\n".join(["%s" % (host["oo_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "oo_host-type" in host])' elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then - /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])' + /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["oo_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "oo_host-type" in host])' elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then - /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])' + /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["oo_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "oo_host-type" in host])' fi } diff --git a/bin/ossh_zsh_completion b/bin/ossh_zsh_completion index f9454357b..170ca889b 100644 --- a/bin/ossh_zsh_completion +++ b/bin/ossh_zsh_completion @@ -2,13 +2,13 @@ _ossh_known_hosts(){ if python -c 'import openshift_ansible' &>/dev/null; then - print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])') + print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])') elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then - print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])') + print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])') elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then - print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])') + print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])') fi diff --git a/bin/zsh_functions/_ossh b/bin/zsh_functions/_ossh index e34ca5bd4..65979c58a 100644 --- a/bin/zsh_functions/_ossh +++ b/bin/zsh_functions/_ossh @@ -2,7 +2,7 @@ _ossh_known_hosts(){ if [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then - print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items()])') + print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items()])') fi } -- cgit v1.2.3 From 645becf849fecee13d3e0bfcf6f780b3ecdd10de Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Mon, 11 Jan 2016 09:28:35 -0500 Subject: Fixing tab completion for latest metadata changes --- bin/ossh_bash_completion | 12 ++++++------ bin/ossh_zsh_completion | 6 +++--- bin/zsh_functions/_ossh | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/bin/ossh_bash_completion b/bin/ossh_bash_completion index 440fa0a45..e4680cbcc 100755 --- a/bin/ossh_bash_completion +++ b/bin/ossh_bash_completion @@ -1,12 +1,12 @@ __ossh_known_hosts(){ if python -c 'import openshift_ansible' &>/dev/null; then - /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])' + /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])' elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then - /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])' + /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])' elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then - /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])' + /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])' fi } @@ -26,13 +26,13 @@ complete -F _ossh ossh oscp __opssh_known_hosts(){ if python -c 'import openshift_ansible' &>/dev/null; then - /usr/bin/python -c 'from openshift_ansible.multi_inventory import MultiInventory; m=MultiInventory(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])' + /usr/bin/python -c 'from openshift_ansible.multi_inventory import MultiInventory; m=MultiInventory(); m.run(); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in m.result["_meta"]["hostvars"].items() if "oo_hosttype" in host]))' elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then - /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])' + /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["oo_hosttype"]) for dns, host in z["_meta"]["hostvars"].items() if "oo_hosttype" in host])' elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then - /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])' + /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["oo_hosttype"]) for dns, host in z["_meta"]["hostvars"].items() if "oo_hosttype" in host])' fi } diff --git a/bin/ossh_zsh_completion b/bin/ossh_zsh_completion index f9454357b..170ca889b 100644 --- a/bin/ossh_zsh_completion +++ b/bin/ossh_zsh_completion @@ -2,13 +2,13 @@ _ossh_known_hosts(){ if python -c 'import openshift_ansible' &>/dev/null; then - print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])') + print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])') elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then - print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])') + print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])') elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then - print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])') + print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])') fi diff --git a/bin/zsh_functions/_ossh b/bin/zsh_functions/_ossh index e34ca5bd4..65979c58a 100644 --- a/bin/zsh_functions/_ossh +++ b/bin/zsh_functions/_ossh @@ -2,7 +2,7 @@ _ossh_known_hosts(){ if [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then - print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items()])') + print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items()])') fi } -- cgit v1.2.3 From 7c48bcc8a708828ff78a4b239eca6cf60531f5e2 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Mon, 11 Jan 2016 12:17:05 -0500 Subject: Automatic commit of package [openshift-ansible] release [3.0.28-1]. --- .tito/packages/openshift-ansible | 2 +- openshift-ansible.spec | 11 ++++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 6c9176ad0..271e86d20 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.0.27-1 ./ +3.0.28-1 ./ diff --git a/openshift-ansible.spec b/openshift-ansible.spec index d6f04adde..70938e8d2 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.0.27 +Version: 3.0.28 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -259,6 +259,15 @@ Atomic OpenShift Utilities includes %changelog +* Mon Jan 11 2016 Kenny Woodson 3.0.28-1 +- added the rhe7-host-monitoring service file (mwoodson@redhat.com) +- Fixing tab completion for latest metadata changes (kwoodson@redhat.com) +- Removing some internal hostnames (bleanhar@redhat.com) +- Fixing tab completion for latest metadata changes (kwoodson@redhat.com) +- Make bin/cluster able to spawn OSE 3.1 clusters (lhuard@amadeus.com) +- oso_host_monitoring role: removed the f22 and zagg client, replaced it with + oso-rhel7-host-monitoring container (mwoodson@redhat.com) + * Fri Jan 08 2016 Kenny Woodson 3.0.27-1 - Update to metadata tooling. (kwoodson@redhat.com) - Fix VM drive cleanup during terminate on libvirt (lhuard@amadeus.com) -- cgit v1.2.3 From 8be4562550db9285039738173607cce446979190 Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Mon, 11 Jan 2016 13:02:46 -0500 Subject: Set the cli image to match osm_image in openshift_cli role --- roles/openshift_cli/tasks/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml index ec77e4ebc..2b53c9b8e 100644 --- a/roles/openshift_cli/tasks/main.yml +++ b/roles/openshift_cli/tasks/main.yml @@ -3,6 +3,7 @@ role: common local_facts: deployment_type: "{{ openshift_deployment_type }}" + cli_image: "{{ osm_image | default(None) }}" - name: Install clients yum: pkg={{ openshift.common.service_type }}-clients state=installed -- cgit v1.2.3 From 4bc761ecebf2617780327b2681b057c97486ae80 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Mon, 11 Jan 2016 14:49:35 -0500 Subject: Set portal net in master playbook --- playbooks/common/openshift-master/config.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 677c274c4..4ecdf2a0c 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -51,6 +51,7 @@ console_url: "{{ openshift_master_console_url | default(None) }}" console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" public_console_url: "{{ openshift_master_public_console_url | default(None) }}" + portal_net: "{{ openshift_master_portal_net | default(None) }}" - name: Check status of external etcd certificatees stat: path: "{{ openshift.common.config_base }}/master/{{ item }}" -- cgit v1.2.3 From 1da2b5f6ed62329d6f5cdc1df90106e5c896ef1e Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Fri, 8 Jan 2016 13:11:52 -0500 Subject: re-enable containerize installs --- utils/src/ooinstall/cli_installer.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index c86ba2f4f..3c6de44cf 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -130,14 +130,13 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen host_props['node'] = True #TODO: Reenable this option once container installs are out of tech preview - #rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?', - # type=click.Choice(['rpm', 'container']), - # default='rpm') - #if rpm_or_container == 'container': - # host_props['containerized'] = True - #else: - # host_props['containerized'] = False - host_props['containerized'] = False + rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?', + type=click.Choice(['rpm', 'container']), + default='rpm') + if rpm_or_container == 'container': + host_props['containerized'] = True + else: + host_props['containerized'] = False host = Host(**host_props) -- cgit v1.2.3 From dea9abfe22864cf10d85d85370b1633ca18060b6 Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Mon, 14 Dec 2015 15:29:24 -0400 Subject: Implement simple master rolling restarts. Blocks running ansible on a host that will be restarted. Can restart just services, or optionally the full system. --- playbooks/byo/openshift-cluster/restart.yml | 7 +++ playbooks/common/openshift-cluster/restart.yml | 78 ++++++++++++++++++++++++++ 2 files changed, 85 insertions(+) create mode 100644 playbooks/byo/openshift-cluster/restart.yml create mode 100644 playbooks/common/openshift-cluster/restart.yml diff --git a/playbooks/byo/openshift-cluster/restart.yml b/playbooks/byo/openshift-cluster/restart.yml new file mode 100644 index 000000000..da0da69a6 --- /dev/null +++ b/playbooks/byo/openshift-cluster/restart.yml @@ -0,0 +1,7 @@ +--- +- include: ../../common/openshift-cluster/restart.yml + vars: + g_etcd_hosts: "{{ groups.etcd | default([]) }}" + g_master_hosts: "{{ groups.masters | default([]) }}" + g_node_hosts: "{{ groups.nodes | default([]) }}" + g_lb_hosts: "{{ groups.lb | default([]) }}" diff --git a/playbooks/common/openshift-cluster/restart.yml b/playbooks/common/openshift-cluster/restart.yml new file mode 100644 index 000000000..4117f7297 --- /dev/null +++ b/playbooks/common/openshift-cluster/restart.yml @@ -0,0 +1,78 @@ +--- +- include: evaluate_groups.yml +# TODO: verify this is an HA environment +# TODO: fork for pacemaker vs haproxy (based on?) + +- name: Validate configuration for rolling restart + hosts: oo_masters_to_config + tasks: + - set_fact: + openshift_rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}" + - fail: + msg: "openshift_rolling_restart_mode must be set to either 'services' or 'system'" + when: openshift_rolling_restart_mode is defined and openshift_rolling_restart_mode not in ["services", "system"] + +# Creating a temp file on localhost, we then check each system that will +# be rebooted to see if that file exists, if so we know we're running +# ansible on a machine that needs a reboot, and we need to error out. +- name: Create temp file on localhost + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - local_action: command mktemp + register: mktemp + changed_when: False + +- name: Check if temp file exists on any masters + hosts: oo_masters_to_config + tasks: + - stat: path="{{ hostvars.localhost.mktemp.stdout }}" + register: exists + +- name: Cleanup temp file on localhost + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent + +- name: Fail if restarting the system where ansible is running + hosts: oo_masters_to_config + any_errors_fatal: true + tasks: + - fail: msg="Cannot run playbook on a host that will be restarted." + when: exists.stat.exists + +- name: Restart Masters + hosts: oo_masters_to_config + serial: 1 + roles: + - openshift_facts + tasks: + - name: Restart master system + # https://github.com/ansible/ansible/issues/10616 + shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart" + async: 1 + poll: 0 + ignore_errors: true + become: yes + when: openshift_rolling_restart_mode == 'system' + - name: Restart master services + service: + name: "{{ openshift.common.service_type }}-master-api" + state: restarted + # NOTE: no need to check openshift_master_ha here, we know it must be, + # thus the api service is the one we restart. + when: openshift_rolling_restart_mode == 'services' + + - name: Wait for master API to come back online + become: no + local_action: + module: wait_for + host="{{ inventory_hostname }}" + state=started + delay=10 + port=8443 # TODO: should this be made a master host variable? -- cgit v1.2.3 From f9aaa8ac13adf841823f35be594641bdc2ebecac Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Tue, 5 Jan 2016 11:39:22 -0500 Subject: Update rolling restart playbook for pacemaker support. Replace fail with a warn and prompt if running ansible from a host that will be rebooted. Re-organize playbooks. --- playbooks/byo/openshift-cluster/restart.yml | 7 -- playbooks/byo/openshift-master/filter_plugins | 1 + playbooks/byo/openshift-master/lookup_plugins | 1 + playbooks/byo/openshift-master/restart.yml | 4 + playbooks/byo/openshift-master/roles | 1 + playbooks/common/openshift-cluster/restart.yml | 78 ------------- playbooks/common/openshift-master/restart.yml | 128 +++++++++++++++++++++ .../common/openshift-master/restart_hosts.yml | 28 +++++ .../openshift-master/restart_hosts_pacemaker.yml | 25 ++++ .../common/openshift-master/restart_services.yml | 27 +++++ .../restart_services_pacemaker.yml | 10 ++ 11 files changed, 225 insertions(+), 85 deletions(-) delete mode 100644 playbooks/byo/openshift-cluster/restart.yml create mode 120000 playbooks/byo/openshift-master/filter_plugins create mode 120000 playbooks/byo/openshift-master/lookup_plugins create mode 100644 playbooks/byo/openshift-master/restart.yml create mode 120000 playbooks/byo/openshift-master/roles delete mode 100644 playbooks/common/openshift-cluster/restart.yml create mode 100644 playbooks/common/openshift-master/restart.yml create mode 100644 playbooks/common/openshift-master/restart_hosts.yml create mode 100644 playbooks/common/openshift-master/restart_hosts_pacemaker.yml create mode 100644 playbooks/common/openshift-master/restart_services.yml create mode 100644 playbooks/common/openshift-master/restart_services_pacemaker.yml diff --git a/playbooks/byo/openshift-cluster/restart.yml b/playbooks/byo/openshift-cluster/restart.yml deleted file mode 100644 index da0da69a6..000000000 --- a/playbooks/byo/openshift-cluster/restart.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- include: ../../common/openshift-cluster/restart.yml - vars: - g_etcd_hosts: "{{ groups.etcd | default([]) }}" - g_master_hosts: "{{ groups.masters | default([]) }}" - g_node_hosts: "{{ groups.nodes | default([]) }}" - g_lb_hosts: "{{ groups.lb | default([]) }}" diff --git a/playbooks/byo/openshift-master/filter_plugins b/playbooks/byo/openshift-master/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/byo/openshift-master/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins \ No newline at end of file diff --git a/playbooks/byo/openshift-master/lookup_plugins b/playbooks/byo/openshift-master/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/byo/openshift-master/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins \ No newline at end of file diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml new file mode 100644 index 000000000..a78a6aa3d --- /dev/null +++ b/playbooks/byo/openshift-master/restart.yml @@ -0,0 +1,4 @@ +--- +- include: ../../common/openshift-master/restart.yml + vars_files: + - ../../byo/openshift-cluster/cluster_hosts.yml diff --git a/playbooks/byo/openshift-master/roles b/playbooks/byo/openshift-master/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/byo/openshift-master/roles @@ -0,0 +1 @@ +../../../roles \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/restart.yml b/playbooks/common/openshift-cluster/restart.yml deleted file mode 100644 index 4117f7297..000000000 --- a/playbooks/common/openshift-cluster/restart.yml +++ /dev/null @@ -1,78 +0,0 @@ ---- -- include: evaluate_groups.yml -# TODO: verify this is an HA environment -# TODO: fork for pacemaker vs haproxy (based on?) - -- name: Validate configuration for rolling restart - hosts: oo_masters_to_config - tasks: - - set_fact: - openshift_rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}" - - fail: - msg: "openshift_rolling_restart_mode must be set to either 'services' or 'system'" - when: openshift_rolling_restart_mode is defined and openshift_rolling_restart_mode not in ["services", "system"] - -# Creating a temp file on localhost, we then check each system that will -# be rebooted to see if that file exists, if so we know we're running -# ansible on a machine that needs a reboot, and we need to error out. -- name: Create temp file on localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - local_action: command mktemp - register: mktemp - changed_when: False - -- name: Check if temp file exists on any masters - hosts: oo_masters_to_config - tasks: - - stat: path="{{ hostvars.localhost.mktemp.stdout }}" - register: exists - -- name: Cleanup temp file on localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent - -- name: Fail if restarting the system where ansible is running - hosts: oo_masters_to_config - any_errors_fatal: true - tasks: - - fail: msg="Cannot run playbook on a host that will be restarted." - when: exists.stat.exists - -- name: Restart Masters - hosts: oo_masters_to_config - serial: 1 - roles: - - openshift_facts - tasks: - - name: Restart master system - # https://github.com/ansible/ansible/issues/10616 - shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart" - async: 1 - poll: 0 - ignore_errors: true - become: yes - when: openshift_rolling_restart_mode == 'system' - - name: Restart master services - service: - name: "{{ openshift.common.service_type }}-master-api" - state: restarted - # NOTE: no need to check openshift_master_ha here, we know it must be, - # thus the api service is the one we restart. - when: openshift_rolling_restart_mode == 'services' - - - name: Wait for master API to come back online - become: no - local_action: - module: wait_for - host="{{ inventory_hostname }}" - state=started - delay=10 - port=8443 # TODO: should this be made a master host variable? diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml new file mode 100644 index 000000000..7603f0d61 --- /dev/null +++ b/playbooks/common/openshift-master/restart.yml @@ -0,0 +1,128 @@ +--- +- include: ../openshift-cluster/evaluate_groups.yml + +- name: Validate configuration for rolling restart + hosts: oo_masters_to_config + roles: + - openshift_facts + tasks: + - fail: + msg: "openshift_rolling_restart_mode must be set to either 'services' or 'system'" + when: openshift_rolling_restart_mode is defined and openshift_rolling_restart_mode not in ["services", "system"] + - openshift_facts: + role: "{{ item.role }}" + local_facts: "{{ item.local_facts }}" + with_items: + - role: common + local_facts: + rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}" + - role: master + local_facts: + cluster_method: "{{ openshift_master_cluster_method | default(None) }}" + +# Creating a temp file on localhost, we then check each system that will +# be rebooted to see if that file exists, if so we know we're running +# ansible on a machine that needs a reboot, and we need to error out. +- name: Create temp file on localhost + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - local_action: command mktemp + register: mktemp + changed_when: false + +- name: Check if temp file exists on any masters + hosts: oo_masters_to_config + tasks: + - stat: path="{{ hostvars.localhost.mktemp.stdout }}" + register: exists + changed_when: false + +- name: Cleanup temp file on localhost + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent + changed_when: false + +- name: Warn if restarting the system where ansible is running + hosts: oo_masters_to_config + tasks: + - pause: + prompt: > + Warning: Running playbook from a host that will be restarted! + Press CTRL+C and A to abort playbook execution. You may + continue by pressing ENTER but the playbook will stop + executing once this system restarts and services must be + manually verified. + when: exists.stat.exists and openshift.common.rolling_restart_mode == 'system' + - set_fact: + current_host: "{{ exists.stat.exists }}" + when: openshift.common.rolling_restart_mode == 'system' + +- name: Determine which masters are currently active + hosts: oo_masters_to_config + tasks: + - name: Check master service status + command: > + systemctl is-active {{ openshift.common.service_type }}-master + register: active_check_output + when: openshift.master.cluster_method == 'pacemaker' + failed_when: active_check_output.stdout not in ['active', 'inactive'] + - set_fact: + is_active: "{{ active_check_output.stdout == 'active' }}" + when: openshift.master.cluster_method == 'pacemaker' + +- name: Evaluate master groups + hosts: localhost + become: no + tasks: + - name: Evaluate oo_active_masters + add_host: + name: "{{ item }}" + groups: oo_active_masters + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: "{{ groups.oo_masters_to_config | default([]) }}" + when: (hostvars[item]['is_active'] | default(false)) | bool + - name: Evaluate oo_current_masters + add_host: + name: "{{ item }}" + groups: oo_current_masters + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: "{{ groups.oo_masters_to_config | default([]) }}" + when: (hostvars[item]['current_host'] | default(false)) | bool + +- name: Restart masters + hosts: oo_masters_to_config:!oo_active_masters:!oo_current_masters + vars: + openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" + serial: 1 + tasks: + - include: restart_hosts.yml + when: openshift.common.rolling_restart_mode == 'system' + - include: restart_services.yml + when: openshift.common.rolling_restart_mode == 'services' + +- name: Restart active masters + hosts: oo_active_masters + serial: 1 + tasks: + - include: restart_hosts_pacemaker.yml + when: openshift.common.rolling_restart_mode == 'system' + - include: restart_services_pacemaker.yml + when: openshift.common.rolling_restart_mode == 'services' + +- name: Restart current masters + hosts: oo_current_masters + serial: 1 + tasks: + - include: restart_hosts.yml + when: openshift.common.rolling_restart_mode == 'system' + - include: restart_services.yml + when: openshift.common.rolling_restart_mode == 'services' diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml new file mode 100644 index 000000000..598e1ad63 --- /dev/null +++ b/playbooks/common/openshift-master/restart_hosts.yml @@ -0,0 +1,28 @@ +- name: Restart master system + # https://github.com/ansible/ansible/issues/10616 + shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart" + async: 1 + poll: 0 + ignore_errors: true + become: yes +# When cluster_method != pacemaker we can ensure the api_port is +# available. +- name: Wait for master API to come back online + become: no + local_action: + module: wait_for + host="{{ inventory_hostname }}" + state=started + delay=10 + port="{{ openshift.master.api_port }}" + when: openshift.master.cluster_method != 'pacemaker' +# When cluster_method is pacemaker we can only ensure that the host +# restarted successfully. +- name: Wait for master to start + become: no + local_action: + module: wait_for + host="{{ inventory_hostname }}" + state=started + delay=10 + when: openshift.master.cluster_method == 'pacemaker' diff --git a/playbooks/common/openshift-master/restart_hosts_pacemaker.yml b/playbooks/common/openshift-master/restart_hosts_pacemaker.yml new file mode 100644 index 000000000..c9219e8de --- /dev/null +++ b/playbooks/common/openshift-master/restart_hosts_pacemaker.yml @@ -0,0 +1,25 @@ +- name: Fail over master resource + command: > + pcs resource move master {{ hostvars | oo_select_keys(groups['oo_masters_to_config']) | oo_collect('openshift.common.hostname', {'is_active': 'False'}) | list | first }} +- name: Wait for master API to come back online + become: no + local_action: + module: wait_for + host="{{ openshift.master.cluster_hostname }}" + state=started + delay=10 + port="{{ openshift.master.api_port }}" +- name: Restart master system + # https://github.com/ansible/ansible/issues/10616 + shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart" + async: 1 + poll: 0 + ignore_errors: true + become: yes +- name: Wait for master to start + become: no + local_action: + module: wait_for + host="{{ inventory_hostname }}" + state=started + delay=10 diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml new file mode 100644 index 000000000..5e539cd65 --- /dev/null +++ b/playbooks/common/openshift-master/restart_services.yml @@ -0,0 +1,27 @@ +- name: Restart master + service: + name: "{{ openshift.common.service_type }}-master" + state: restarted + when: not openshift_master_ha | bool +- name: Restart master API + service: + name: "{{ openshift.common.service_type }}-master-api" + state: restarted + when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker' +- name: Wait for master API to come back online + become: no + local_action: + module: wait_for + host="{{ inventory_hostname }}" + state=started + delay=10 + port="{{ openshift.master.api_port }}" + when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker' +- name: Restart master controllers + service: + name: "{{ openshift.common.service_type }}-master-controllers" + state: restarted + # Ignore errrors since it is possible that type != simple for + # pre-3.1.1 installations. + ignore_errors: true + when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker' diff --git a/playbooks/common/openshift-master/restart_services_pacemaker.yml b/playbooks/common/openshift-master/restart_services_pacemaker.yml new file mode 100644 index 000000000..e738f3fb6 --- /dev/null +++ b/playbooks/common/openshift-master/restart_services_pacemaker.yml @@ -0,0 +1,10 @@ +- name: Restart master services + command: pcs resource restart master +- name: Wait for master API to come back online + become: no + local_action: + module: wait_for + host="{{ openshift.master.cluster_hostname }}" + state=started + delay=10 + port="{{ openshift.master.api_port }}" -- cgit v1.2.3 From 95223c4b92b9c45bda2b00989bc51461d8073c5d Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Mon, 11 Jan 2016 16:28:53 -0500 Subject: Switch to using hostnamectl as it works on atomic and rhel7 --- roles/openshift_common/tasks/main.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index 0ee873a2b..3a2ccb59a 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -38,5 +38,6 @@ set_hostname_default: "{{ not openshift.common.version_greater_than_3_1_or_1_1 }}" - name: Set hostname - hostname: name={{ openshift.common.hostname }} + command: > + hostnamectl set-hostname {{ openshift.common.hostname }} when: openshift_set_hostname | default(set_hostname_default) | bool -- cgit v1.2.3 From 4725d3509e1315ad671a77d355c7a5328578390b Mon Sep 17 00:00:00 2001 From: Lutz Lange Date: Tue, 12 Jan 2016 09:09:31 +0000 Subject: - sqashed to one commit --- filter_plugins/oo_filters.py | 15 ++++++++++++++- roles/openshift_master/templates/master.yaml.v1.j2 | 4 ++-- roles/openshift_node/templates/node.yaml.v1.j2 | 4 +--- 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 326c36f6c..c262693ec 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -12,6 +12,8 @@ import os import pdb import re import json +import yaml +from ansible.utils.unicode import to_unicode class FilterModule(object): ''' Custom ansible filters ''' @@ -474,6 +476,16 @@ class FilterModule(object): secret = os.urandom(num_bytes) return secret.encode('base-64').strip() + @staticmethod + def to_padded_yaml(data, level=0, indent=2, **kw): + ''' returns a yaml snippet padded to match the indent level you specify ''' + try: + transformed = yaml.safe_dump(data, indent=indent, allow_unicode=True, default_flow_style=False, **kw) + padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()]) + return to_unicode("\n{0}".format(padded)) + except Exception as my_e: + raise errors.AnsibleFilterError('Failed to convert: %s', my_e) + def filters(self): ''' returns a mapping of filters to methods ''' return { @@ -493,5 +505,6 @@ class FilterModule(object): "oo_parse_named_certificates": self.oo_parse_named_certificates, "oo_haproxy_backend_masters": self.oo_haproxy_backend_masters, "oo_pretty_print_cluster": self.oo_pretty_print_cluster, - "oo_generate_secret": self.oo_generate_secret + "oo_generate_secret": self.oo_generate_secret, + "to_padded_yaml": self.to_padded_yaml, } diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 647476b7f..dfcaf1953 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -87,8 +87,8 @@ kubernetesMasterConfig: - v1beta3 - v1 {% endif %} - apiServerArguments: {{ openshift.master.api_server_args | default(None) | to_json }} - controllerArguments: {{ openshift.master.controller_args | default(None) | to_json }} + apiServerArguments: {{ openshift.master.api_server_args | default(None) | to_padded_yaml( level=2 ) }} + controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }} masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }} masterIP: {{ openshift.common.ip }} podEvictionTimeout: "" diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 23bd81f91..cbe811f83 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -11,9 +11,7 @@ imageConfig: format: {{ openshift.node.registry_url }} latest: false kind: NodeConfig -{% if openshift.node.kubelet_args is defined and openshift.node.kubelet_args %} -kubeletArguments: {{ openshift.node.kubelet_args | to_json }} -{% endif %} +kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yaml(level=1) }} masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig {% if openshift.common.use_openshift_sdn %} networkPluginName: {{ openshift.common.sdn_network_plugin_name }} -- cgit v1.2.3 From 0d6ec0d9547ac95980d4444aa70216312455e837 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Tue, 12 Jan 2016 11:42:09 -0500 Subject: Fix for to_padded_yaml filter - Fix issue where None is passed to to_padded_yaml filter and invalid config is generated. --- filter_plugins/oo_filters.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index c262693ec..671c237b9 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -479,6 +479,9 @@ class FilterModule(object): @staticmethod def to_padded_yaml(data, level=0, indent=2, **kw): ''' returns a yaml snippet padded to match the indent level you specify ''' + if data in [None, ""]: + return "" + try: transformed = yaml.safe_dump(data, indent=indent, allow_unicode=True, default_flow_style=False, **kw) padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()]) -- cgit v1.2.3 From 8774f4995654715629be47a8cd5814bdb1962ec9 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Mon, 11 Jan 2016 11:24:27 -0500 Subject: Add wait for API before starting controllers w/ native ha install. --- roles/openshift_master/tasks/main.yml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 3b46a0df4..fbbcd2a8f 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -287,6 +287,22 @@ master_api_service_status_changed: "{{ start_result | changed }}" when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' +- name: Wait for API to become available + # Using curl here since the uri module requires python-httplib2 and + # wait_for port doesn't provide health information. + command: > + curl -k --head --silent {{ openshift.master.api_url }} + register: api_available_output + until: api_available_output.stdout.find("200 OK") != -1 + retries: 120 + delay: 1 + changed_when: false + +- fail: + msg: > + Unable to contact master API at {{ openshift.master.api_url }} + when: api_available_output.stdout.find("200 OK") == -1 + - name: Start and enable master controller service: name={{ openshift.common.service_type }}-master-controllers enabled=yes state=started when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' -- cgit v1.2.3 From 2586da1a7b264cb74ea940b46e2376ae7181dbdb Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Tue, 12 Jan 2016 12:49:23 -0500 Subject: Add is_containerized inputs to nosetests. --- utils/test/cli_installer_tests.py | 42 +++++++++++++++++++-------------------- utils/test/fixture.py | 16 ++++++++++++--- 2 files changed, 34 insertions(+), 24 deletions(-) diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py index ea380d565..72e8521d0 100644 --- a/utils/test/cli_installer_tests.py +++ b/utils/test/cli_installer_tests.py @@ -681,9 +681,9 @@ class AttendedCliTests(OOCliFixture): run_playbook_mock.return_value = 0 cli_input = build_input(hosts=[ - ('10.0.0.1', True), - ('10.0.0.2', False), - ('10.0.0.3', False)], + ('10.0.0.1', True, False), + ('10.0.0.2', False, False), + ('10.0.0.3', False, False)], ssh_user='root', variant_num=1, confirm_facts='y') @@ -722,10 +722,10 @@ class AttendedCliTests(OOCliFixture): run_playbook_mock.return_value = 0 cli_input = build_input(hosts=[ - ('10.0.0.1', True), - ('10.0.0.2', False), + ('10.0.0.1', True, False), + ('10.0.0.2', False, False), ], - add_nodes=[('10.0.0.3', False)], + add_nodes=[('10.0.0.3', False, False)], ssh_user='root', variant_num=1, confirm_facts='y') @@ -773,9 +773,9 @@ class AttendedCliTests(OOCliFixture): mock_facts['10.0.0.2']['common']['version'] = "3.0.0" cli_input = build_input(hosts=[ - ('10.0.0.1', True), + ('10.0.0.1', True, False), ], - add_nodes=[('10.0.0.2', False)], + add_nodes=[('10.0.0.2', False, False)], ssh_user='root', variant_num=1, schedulable_masters_ok=True, @@ -796,10 +796,10 @@ class AttendedCliTests(OOCliFixture): run_playbook_mock.return_value = 0 cli_input = build_input(hosts=[ - ('10.0.0.1', True), - ('10.0.0.2', True), - ('10.0.0.3', True), - ('10.0.0.4', False)], + ('10.0.0.1', True, False), + ('10.0.0.2', True, False), + ('10.0.0.3', True, False), + ('10.0.0.4', False, False)], ssh_user='root', variant_num=1, confirm_facts='y', @@ -837,9 +837,9 @@ class AttendedCliTests(OOCliFixture): run_playbook_mock.return_value = 0 cli_input = build_input(hosts=[ - ('10.0.0.1', True), - ('10.0.0.2', True), - ('10.0.0.3', True)], + ('10.0.0.1', True, False), + ('10.0.0.2', True, False), + ('10.0.0.3', True, False)], ssh_user='root', variant_num=1, confirm_facts='y', @@ -872,10 +872,10 @@ class AttendedCliTests(OOCliFixture): run_playbook_mock.return_value = 0 cli_input = build_input(hosts=[ - ('10.0.0.1', True), - ('10.0.0.2', True), - ('10.0.0.3', False), - ('10.0.0.4', True)], + ('10.0.0.1', True, False), + ('10.0.0.2', True, False), + ('10.0.0.3', False, False), + ('10.0.0.4', True, False)], ssh_user='root', variant_num=1, confirm_facts='y', @@ -893,7 +893,7 @@ class AttendedCliTests(OOCliFixture): run_playbook_mock.return_value = 0 cli_input = build_input(hosts=[ - ('10.0.0.1', True)], + ('10.0.0.1', True, False)], ssh_user='root', variant_num=1, confirm_facts='y') @@ -921,7 +921,7 @@ class AttendedCliTests(OOCliFixture): run_playbook_mock.return_value = 0 cli_input = build_input(hosts=[ - ('10.0.0.1', True)], + ('10.0.0.1', True, False)], ssh_user='root', variant_num=2, confirm_facts='y') diff --git a/utils/test/fixture.py b/utils/test/fixture.py index 90bd9e1ef..be759578a 100644 --- a/utils/test/fixture.py +++ b/utils/test/fixture.py @@ -138,7 +138,7 @@ class OOCliFixture(OOInstallFixture): self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on)) -#pylint: disable=too-many-arguments,too-many-branches +#pylint: disable=too-many-arguments,too-many-branches,too-many-statements def build_input(ssh_user=None, hosts=None, variant_num=None, add_nodes=None, confirm_facts=None, schedulable_masters_ok=None, master_lb=None): @@ -163,13 +163,19 @@ def build_input(ssh_user=None, hosts=None, variant_num=None, num_masters = 0 if hosts: i = 0 - for (host, is_master) in hosts: + for (host, is_master, is_containerized) in hosts: inputs.append(host) if is_master: inputs.append('y') num_masters += 1 else: inputs.append('n') + + if is_containerized: + inputs.append('container') + else: + inputs.append('rpm') + #inputs.append('rpm') # We should not be prompted to add more hosts if we're currently at # 2 masters, this is an invalid HA configuration, so this question @@ -196,8 +202,12 @@ def build_input(ssh_user=None, hosts=None, variant_num=None, inputs.append('y') inputs.append('1') # Add more nodes i = 0 - for (host, is_master) in add_nodes: + for (host, is_master, is_containerized) in add_nodes: inputs.append(host) + if is_containerized: + inputs.append('container') + else: + inputs.append('rpm') #inputs.append('rpm') if i < len(add_nodes) - 1: inputs.append('y') # Add more hosts -- cgit v1.2.3 From 4b636261736c93f01de1ba81af531ab83825ce6f Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Tue, 12 Jan 2016 16:38:48 -0500 Subject: Removed atomic host check --- roles/os_zabbix/tasks/main.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/roles/os_zabbix/tasks/main.yml b/roles/os_zabbix/tasks/main.yml index a8b65dd56..7552086d4 100644 --- a/roles/os_zabbix/tasks/main.yml +++ b/roles/os_zabbix/tasks/main.yml @@ -1,8 +1,4 @@ --- -- fail: - msg: "Zabbix config is not yet supported on atomic hosts" - when: openshift.common.is_containerized | bool - - name: Main List all templates zbx_template: zbx_server: "{{ ozb_server }}" -- cgit v1.2.3 From 97be5890e2a34036a22d2d1e2586c83009ae6064 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Tue, 12 Jan 2016 16:39:06 -0500 Subject: Validate pacemaker cluster members. --- filter_plugins/openshift_master.py | 28 +++++++++++++++++++++- playbooks/common/openshift-master/restart.yml | 13 ++++++++++ .../common/openshift-master/restart_hosts.yml | 15 ++++++++++-- roles/openshift_facts/tasks/main.yml | 1 + 4 files changed, 54 insertions(+), 3 deletions(-) diff --git a/filter_plugins/openshift_master.py b/filter_plugins/openshift_master.py index 8d7c62ad1..7ababc51a 100644 --- a/filter_plugins/openshift_master.py +++ b/filter_plugins/openshift_master.py @@ -463,6 +463,32 @@ class FilterModule(object): IdentityProviderBase.validate_idp_list(idp_list) return yaml.safe_dump([idp.to_dict() for idp in idp_list], default_flow_style=False) + @staticmethod + def validate_pcs_cluster(data, masters=None): + ''' Validates output from "pcs status", ensuring that each master + provided is online. + Ex: data = ('...', + 'PCSD Status:', + 'master1.example.com: Online', + 'master2.example.com: Online', + 'master3.example.com: Online', + '...') + masters = ['master1.example.com', + 'master2.example.com', + 'master3.example.com'] + returns True + ''' + if not issubclass(type(data), str): + raise errors.AnsibleFilterError("|failed expects data is a string") + if not issubclass(type(masters), list): + raise errors.AnsibleFilterError("|failed expects masters is a list") + valid = True + for master in masters: + if "{0}: Online".format(master) not in data: + valid = False + return valid + def filters(self): ''' returns a mapping of filters to methods ''' - return {"translate_idps": self.translate_idps} + return {"translate_idps": self.translate_idps, + "validate_pcs_cluster": self.validate_pcs_cluster} diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml index 7603f0d61..fa13a64cb 100644 --- a/playbooks/common/openshift-master/restart.yml +++ b/playbooks/common/openshift-master/restart.yml @@ -73,6 +73,7 @@ register: active_check_output when: openshift.master.cluster_method == 'pacemaker' failed_when: active_check_output.stdout not in ['active', 'inactive'] + changed_when: false - set_fact: is_active: "{{ active_check_output.stdout == 'active' }}" when: openshift.master.cluster_method == 'pacemaker' @@ -98,6 +99,18 @@ with_items: "{{ groups.oo_masters_to_config | default([]) }}" when: (hostvars[item]['current_host'] | default(false)) | bool +- name: Validate pacemaker cluster + hosts: oo_active_masters + tasks: + - name: Retrieve pcs status + command: pcs status + register: pcs_status_output + changed_when: false + - fail: + msg: > + Pacemaker cluster validation failed. One or more nodes are not online. + when: not (pcs_status_output.stdout | validate_pcs_cluster(groups.oo_masters_to_config)) | bool + - name: Restart masters hosts: oo_masters_to_config:!oo_active_masters:!oo_current_masters vars: diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml index 598e1ad63..ff206f5a2 100644 --- a/playbooks/common/openshift-master/restart_hosts.yml +++ b/playbooks/common/openshift-master/restart_hosts.yml @@ -16,8 +16,6 @@ delay=10 port="{{ openshift.master.api_port }}" when: openshift.master.cluster_method != 'pacemaker' -# When cluster_method is pacemaker we can only ensure that the host -# restarted successfully. - name: Wait for master to start become: no local_action: @@ -25,4 +23,17 @@ host="{{ inventory_hostname }}" state=started delay=10 + port=22 when: openshift.master.cluster_method == 'pacemaker' +- name: Wait for master to become available + command: pcs status + register: pcs_status_output + until: pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname]) | bool + retries: 15 + delay: 2 + changed_when: false + when: openshift.master.cluster_method == 'pacemaker' +- fail: + msg: > + Pacemaker cluster validation failed {{ inventory hostname }} is not online. + when: openshift.master.cluster_method == 'pacemaker' and not (pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname])) | bool diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml index 87fa99a3b..e40a1b329 100644 --- a/roles/openshift_facts/tasks/main.yml +++ b/roles/openshift_facts/tasks/main.yml @@ -10,6 +10,7 @@ shell: ls /run/ostree-booted ignore_errors: yes failed_when: false + changed_when: false register: ostree_output # Locally setup containerized facts for now -- cgit v1.2.3 From c5f6db5ddd431f969aa3e2216cc7e880c8405d7b Mon Sep 17 00:00:00 2001 From: Joel Diaz Date: Tue, 12 Jan 2016 16:50:43 -0500 Subject: Add -A parameter to forward ssh agent --- bin/ossh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/bin/ossh b/bin/ossh index 6519e4e08..5e2506638 100755 --- a/bin/ossh +++ b/bin/ossh @@ -72,6 +72,8 @@ class Ossh(object): parser.add_argument('-o', '--ssh_opts', action='store', help='options to pass to SSH.\n \ "-oForwardX11=yes,TCPKeepAlive=yes"') + parser.add_argument('-A', default=False, action="store_true", + help='Forward authentication agent') parser.add_argument('host', nargs='?', default='') self.args = parser.parse_args() @@ -177,6 +179,9 @@ class Ossh(object): if self.user: ssh_args.append('-l%s' % self.user) + if self.args.A: + ssh_args.append('-A') + if self.args.verbose: ssh_args.append('-vvv') -- cgit v1.2.3 From 25e213f79ba5e25bf51d584971064e26d3537b49 Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Thu, 7 Jan 2016 21:59:46 -0500 Subject: Add a Verify API Server handler that waits for the API server to become available --- playbooks/common/openshift-node/config.yml | 11 ++++++++++- roles/openshift_master/handlers/main.yml | 9 +++++++++ roles/openshift_master/tasks/main.yml | 2 ++ 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 483a7768c..fbaf64300 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -215,6 +215,15 @@ | oo_collect('openshift.common.hostname') }}" openshift_node_vars: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}" pre_tasks: - + # Necessary because when you're on a node that's also a master the master will be + # restarted after the node restarts docker and it will take up to 60 seconds for + # systemd to start the master again + - name: Wait for master to become available before proceeding + wait_for: + host: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" + port: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}" + state: started + timeout: 180 + when: openshift.common.is_containerized | bool roles: - openshift_manage_node diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml index e1b95eda4..523ba8ee4 100644 --- a/roles/openshift_master/handlers/main.yml +++ b/roles/openshift_master/handlers/main.yml @@ -2,11 +2,20 @@ - name: restart master service: name={{ openshift.common.service_type }}-master state=restarted when: (not openshift_master_ha | bool) and (not (master_service_status_changed | default(false) | bool)) + notify: Verify API Server - name: restart master api service: name={{ openshift.common.service_type }}-master-api state=restarted when: (openshift_master_ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + notify: Verify API Server - name: restart master controllers service: name={{ openshift.common.service_type }}-master-controllers state=restarted when: (openshift_master_ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + +- name: Verify API Server + wait_for: + host: "{{ openshift.common.ip }}" + port: "{{ openshift.master.api_port }}" + state: started + timeout: 180 diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index fbbcd2a8f..b43e9177e 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -269,6 +269,7 @@ service: name={{ openshift.common.service_type }}-master enabled=yes state=started when: not openshift_master_ha | bool register: start_result + notify: Verify API Server - name: Stop and disable non HA master when running HA service: name={{ openshift.common.service_type }}-master enabled=no state=stopped @@ -282,6 +283,7 @@ service: name={{ openshift.common.service_type }}-master-api enabled=yes state=started when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' register: start_result + notify: Verify API Server - set_fact: master_api_service_status_changed: "{{ start_result | changed }}" -- cgit v1.2.3 From 609469eb8d25baeee30cda96377c9b3fda6e499d Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Tue, 12 Jan 2016 17:18:18 -0500 Subject: Update api verification. --- playbooks/common/openshift-node/config.yml | 20 ++++++++++++++------ roles/openshift_master/handlers/main.yml | 14 +++++++++----- 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index fbaf64300..336cbed5e 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -218,12 +218,20 @@ # Necessary because when you're on a node that's also a master the master will be # restarted after the node restarts docker and it will take up to 60 seconds for # systemd to start the master again - - name: Wait for master to become available before proceeding - wait_for: - host: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" - port: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}" - state: started - timeout: 180 + - name: Wait for master API to become available before proceeding + # Using curl here since the uri module requires python-httplib2 and + # wait_for port doesn't provide health information. + command: > + curl -k --head --silent {{ openshift.master.api_url }} + register: api_available_output + until: api_available_output.stdout.find("200 OK") != -1 + retries: 120 + delay: 1 + changed_when: false when: openshift.common.is_containerized | bool + - fail: + msg: > + Unable to contact master API at {{ openshift.master.api_url }} + when: openshift.common.is_containerized | bool and api_available_output.stdout.find("200 OK") == -1 roles: - openshift_manage_node diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml index 523ba8ee4..6b9992eea 100644 --- a/roles/openshift_master/handlers/main.yml +++ b/roles/openshift_master/handlers/main.yml @@ -14,8 +14,12 @@ when: (openshift_master_ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' - name: Verify API Server - wait_for: - host: "{{ openshift.common.ip }}" - port: "{{ openshift.master.api_port }}" - state: started - timeout: 180 + # Using curl here since the uri module requires python-httplib2 and + # wait_for port doesn't provide health information. + command: > + curl -k --head --silent {{ openshift.master.api_url }} + register: api_available_output + until: api_available_output.stdout.find("200 OK") != -1 + retries: 120 + delay: 1 + changed_when: false -- cgit v1.2.3 From 59dbd6d3dbd3d036119f4aee2203cf191ed68cee Mon Sep 17 00:00:00 2001 From: Samuel Munilla Date: Tue, 12 Jan 2016 14:27:43 -0500 Subject: atomic-openshift-installer: Error handling for unicode hostnames Fix error handling for invalid hostnames. Previously we were trying to print the offending hostname out, which caused errors due to python's handling of unicode strings. Seeing as how the user's input stays on the screen directly above the error there is no need to try and print it and we can avoid this problem entirely. --- utils/src/ooinstall/cli_installer.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index 2b6c9deee..4e30929da 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -33,9 +33,7 @@ def is_valid_hostname(hostname): def validate_prompt_hostname(hostname): if '' == hostname or is_valid_hostname(hostname): return hostname - raise click.BadParameter('"{}" appears to be an invalid hostname. ' \ - 'Please double-check this value i' \ - 'and re-enter it.'.format(hostname)) + raise click.BadParameter('Invalid hostname. Please double-check this value and re-enter it.') def get_ansible_ssh_user(): click.clear() -- cgit v1.2.3 From 2e3e0ebe0d98f5374fbfb3a95145a9665d57fe69 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Wed, 13 Jan 2016 10:16:43 -0500 Subject: Add wait in between api and controllers start for native ha. --- playbooks/common/openshift-master/config.yml | 1 + roles/openshift_master/tasks/main.yml | 9 +++------ 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 4ecdf2a0c..0df03f194 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -313,6 +313,7 @@ - name: Configure master instances hosts: oo_masters_to_config + any_errors_fatal: true serial: 1 vars: sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index b43e9177e..150b76fc8 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -283,12 +283,13 @@ service: name={{ openshift.common.service_type }}-master-api enabled=yes state=started when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' register: start_result - notify: Verify API Server - set_fact: master_api_service_status_changed: "{{ start_result | changed }}" when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' +# A separate wait is required here for native HA since notifies will +# be resolved after all tasks in the role. - name: Wait for API to become available # Using curl here since the uri module requires python-httplib2 and # wait_for port doesn't provide health information. @@ -299,11 +300,7 @@ retries: 120 delay: 1 changed_when: false - -- fail: - msg: > - Unable to contact master API at {{ openshift.master.api_url }} - when: api_available_output.stdout.find("200 OK") == -1 + when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and master_api_service_status_changed | bool - name: Start and enable master controller service: name={{ openshift.common.service_type }}-master-controllers enabled=yes state=started -- cgit v1.2.3 From a391fac26da6889fd314ab2ce21ff33629422fe9 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Wed, 13 Jan 2016 12:13:51 -0500 Subject: Updated help menu for v3 flag --- bin/opssh | 1 + 1 file changed, 1 insertion(+) diff --git a/bin/opssh b/bin/opssh index 3747bc993..7a2ffdb1d 100755 --- a/bin/opssh +++ b/bin/opssh @@ -16,6 +16,7 @@ Options: -c CLUSTER, --cluster CLUSTER which cluster to use -e ENV, --env ENV which environment to use + --v3 When working with v3 environments. v2 by default -t HOST_TYPE, --host-type HOST_TYPE which host type to use --list-host-types list all of the host types -- cgit v1.2.3 From 4d25e4d0b375e953e4125f7247283ef8235d67c2 Mon Sep 17 00:00:00 2001 From: Brenton Leanhardt Date: Mon, 11 Jan 2016 11:41:53 -0500 Subject: 3.1.1 upgrade playbook --- .../upgrades/v3_1_minor/README.md | 17 +++ .../upgrades/v3_1_minor/upgrade.yml | 14 +++ .../upgrades/v3_1_minor/filter_plugins | 1 + .../openshift-cluster/upgrades/v3_1_minor/library | 1 + .../upgrades/v3_1_minor/lookup_plugins | 1 + .../openshift-cluster/upgrades/v3_1_minor/post.yml | 50 ++++++++ .../openshift-cluster/upgrades/v3_1_minor/pre.yml | 87 +++++++++++++ .../openshift-cluster/upgrades/v3_1_minor/roles | 1 + .../upgrades/v3_1_minor/upgrade.yml | 137 +++++++++++++++++++++ 9 files changed, 309 insertions(+) create mode 100644 playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md create mode 100644 playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml create mode 120000 playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins create mode 120000 playbooks/common/openshift-cluster/upgrades/v3_1_minor/library create mode 120000 playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins create mode 100644 playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml create mode 100644 playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml create mode 120000 playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles create mode 100644 playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md new file mode 100644 index 000000000..b230835c3 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md @@ -0,0 +1,17 @@ +# v3.1 minor upgrade playbook +This upgrade will preserve all locally made configuration modifications to the +Masters and Nodes. + +## Overview +This playbook is available as a technical preview. It currently performs the +following steps. + + * Upgrade and restart master services + * Upgrade and restart node services + * Applies the latest cluster policies + * Updates the default router if one exists + * Updates the default registry if one exists + * Updates image streams and quickstarts + +## Usage +ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml new file mode 100644 index 000000000..20fa9b10f --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml @@ -0,0 +1,14 @@ +--- +- include: ../../../../common/openshift-cluster/evaluate_groups.yml + vars: + g_etcd_hosts: "{{ groups.etcd | default([]) }}" + g_master_hosts: "{{ groups.masters | default([]) }}" + g_nfs_hosts: "{{ groups.nfs | default([]) }}" + g_node_hosts: "{{ groups.nodes | default([]) }}" + g_lb_hosts: "{{ groups.lb | default([]) }}" + openshift_cluster_id: "{{ cluster_id | default('default') }}" + openshift_deployment_type: "{{ deployment_type }}" +- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/pre.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml +- include: ../../../openshift-master/restart.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/post.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins new file mode 120000 index 000000000..27ddaa18b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins @@ -0,0 +1 @@ +../../../../../filter_plugins \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library new file mode 120000 index 000000000..53bed9684 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library @@ -0,0 +1 @@ +../library \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins new file mode 120000 index 000000000..cf407f69b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins @@ -0,0 +1 @@ +../../../../../lookup_plugins \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml new file mode 100644 index 000000000..d8336fcae --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml @@ -0,0 +1,50 @@ +--- +############################################################################### +# Post upgrade - Upgrade default router, default registry and examples +############################################################################### +- name: Upgrade default router and default registry + hosts: oo_first_master + vars: + openshift_deployment_type: "{{ deployment_type }}" + registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}" + router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}" + oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" + roles: + # Create the new templates shipped in 3.1.z, existing templates are left + # unmodified. This prevents the subsequent role definition for + # openshift_examples from failing when trying to replace templates that do + # not already exist. We could have potentially done a replace --force to + # create and update in one step. + - openshift_examples + # Update the existing templates + - role: openshift_examples + openshift_examples_import_command: replace + pre_tasks: + - name: Check for default router + command: > + {{ oc_cmd }} get -n default dc/router + register: _default_router + failed_when: false + changed_when: false + + - name: Check for default registry + command: > + {{ oc_cmd }} get -n default dc/docker-registry + register: _default_registry + failed_when: false + changed_when: false + + - name: Update router image to current version + when: _default_router.rc == 0 + command: > + {{ oc_cmd }} patch dc/router -p + '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}' + --api-version=v1 + + - name: Update registry image to current version + when: _default_registry.rc == 0 + command: > + {{ oc_cmd }} patch dc/docker-registry -p + '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}' + --api-version=v1 + diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml new file mode 100644 index 000000000..91780de09 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml @@ -0,0 +1,87 @@ +--- +############################################################################### +# Evaluate host groups and gather facts +############################################################################### +- name: Load openshift_facts + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_facts + +############################################################################### +# Pre-upgrade checks +############################################################################### +- name: Verify upgrade can proceed + hosts: oo_first_master + vars: + openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" + target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}" + gather_facts: no + tasks: + - fail: + msg: > + This upgrade is only supported for origin, openshift-enterprise, and online + deployment types + when: deployment_type not in ['origin','openshift-enterprise', 'online'] + + - fail: + msg: > + openshift_pkg_version is {{ openshift_pkg_version }} which is not a + valid version for a {{ target_version }} upgrade + when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<') + +- name: Verify upgrade can proceed + hosts: oo_masters_to_config:oo_nodes_to_config + vars: + target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}" + tasks: + - name: Clean package cache + command: "{{ ansible_pkg_mgr }} clean all" + + - set_fact: + g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}" + + - name: Determine available versions + script: ../files/versions.sh {{ g_new_service_name }} openshift + register: g_versions_result + + - set_fact: + g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}" + + - set_fact: + g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}" + + - fail: + msg: This playbook requires Origin 1.1 or later + when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<') + + - fail: + msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later + when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<') + + - fail: + msg: Upgrade packages not found + when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<')) + + - set_fact: + pre_upgrade_complete: True + + +############################################################################## +# Gate on pre-upgrade checks +############################################################################## +- name: Gate on pre-upgrade checks + hosts: localhost + connection: local + become: no + vars: + pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}" + tasks: + - set_fact: + pre_upgrade_completed: "{{ hostvars + | oo_select_keys(pre_upgrade_hosts) + | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}" + - set_fact: + pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}" + when: pre_upgrade_failed | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles @@ -0,0 +1 @@ +../../../../../roles \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml new file mode 100644 index 000000000..81dbba1e3 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml @@ -0,0 +1,137 @@ +--- +############################################################################### +# The restart playbook should be run after this playbook completes. +############################################################################### + +############################################################################### +# Upgrade Masters +############################################################################### +- name: Upgrade master packages and configuration + hosts: oo_masters_to_config + vars: + openshift_version: "{{ openshift_pkg_version | default('') }}" + tasks: + - name: Upgrade master packages + command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}" + + - name: Ensure python-yaml present for config upgrade + action: "{{ ansible_pkg_mgr }} name=PyYAML state=present" + when: not openshift.common.is_atomic | bool + +# Currently 3.1.1 does not have any new configuration settings +# +# - name: Upgrade master configuration +# openshift_upgrade_config: +# from_version: '3.0' +# to_version: '3.1' +# role: master +# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}" + +- name: Set master update status to complete + hosts: oo_masters_to_config + tasks: + - set_fact: + master_update_complete: True + +############################################################################## +# Gate on master update complete +############################################################################## +- name: Gate on master update + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + master_update_completed: "{{ hostvars + | oo_select_keys(groups.oo_masters_to_config) + | oo_collect('inventory_hostname', {'master_update_complete': true}) }}" + - set_fact: + master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}" + when: master_update_failed | length > 0 + +############################################################################### +# Upgrade Nodes +############################################################################### +- name: Upgrade nodes + hosts: oo_nodes_to_config + vars: + openshift_version: "{{ openshift_pkg_version | default('') }}" + roles: + - openshift_facts + tasks: + - name: Upgrade node packages + command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}" + + - name: Restart node service + service: name="{{ openshift.common.service_type }}-node" state=restarted + + - set_fact: + node_update_complete: True + +############################################################################## +# Gate on nodes update +############################################################################## +- name: Gate on nodes update + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + node_update_completed: "{{ hostvars + | oo_select_keys(groups.oo_nodes_to_config) + | oo_collect('inventory_hostname', {'node_update_complete': true}) }}" + - set_fact: + node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}" + when: node_update_failed | length > 0 + +############################################################################### +# Reconcile Cluster Roles and Cluster Role Bindings +############################################################################### +- name: Reconcile Cluster Roles and Cluster Role Bindings + hosts: oo_masters_to_config + vars: + origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}" + ent_reconcile_bindings: true + openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" + tasks: + - name: Reconcile Cluster Roles + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-roles --confirm + run_once: true + + - name: Reconcile Cluster Role Bindings + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-role-bindings + --exclude-groups=system:authenticated + --exclude-groups=system:unauthenticated + --exclude-users=system:anonymous + --additive-only=true --confirm + when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool + run_once: true + + - set_fact: + reconcile_complete: True + +############################################################################## +# Gate on reconcile +############################################################################## +- name: Gate on reconcile + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + reconcile_completed: "{{ hostvars + | oo_select_keys(groups.oo_masters_to_config) + | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}" + - set_fact: + reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}" + when: reconcile_failed | length > 0 -- cgit v1.2.3 From d936e6e7ad0a6d64e53b66bbc46f7edc24fa6e0d Mon Sep 17 00:00:00 2001 From: Brenton Leanhardt Date: Wed, 13 Jan 2016 13:58:08 -0500 Subject: Automatic commit of package [openshift-ansible] release [3.0.29-1]. --- .tito/packages/openshift-ansible | 2 +- openshift-ansible.spec | 33 ++++++++++++++++++++++++++++++++- 2 files changed, 33 insertions(+), 2 deletions(-) diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 271e86d20..7cb6daa25 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.0.28-1 ./ +3.0.29-1 ./ diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 70938e8d2..285bda366 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.0.28 +Version: 3.0.29 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -259,6 +259,37 @@ Atomic OpenShift Utilities includes %changelog +* Wed Jan 13 2016 Brenton Leanhardt 3.0.29-1 +- 3.1.1 upgrade playbook (bleanhar@redhat.com) +- Updated help menu for v3 flag (kwoodson@redhat.com) +- Add wait in between api and controllers start for native ha. + (abutcher@redhat.com) +- atomic-openshift-installer: Error handling for unicode hostnames + (smunilla@redhat.com) +- Update api verification. (abutcher@redhat.com) +- Add a Verify API Server handler that waits for the API server to become + available (sdodson@redhat.com) +- Add -A parameter to forward ssh agent (jdiaz@redhat.com) +- Validate pacemaker cluster members. (abutcher@redhat.com) +- Removed atomic host check (kwoodson@redhat.com) +- Add is_containerized inputs to nosetests. (abutcher@redhat.com) +- Add wait for API before starting controllers w/ native ha install. + (abutcher@redhat.com) +- Fix for to_padded_yaml filter (jdetiber@redhat.com) +- - sqashed to one commit (llange@redhat.com) +- Switch to using hostnamectl as it works on atomic and rhel7 + (sdodson@redhat.com) +- Update rolling restart playbook for pacemaker support. Replace fail with a + warn and prompt if running ansible from a host that will be rebooted. Re- + organize playbooks. (abutcher@redhat.com) +- Implement simple master rolling restarts. (dgoodwin@redhat.com) +- re-enable containerize installs (sdodson@redhat.com) +- Set portal net in master playbook (jdetiber@redhat.com) +- Set the cli image to match osm_image in openshift_cli role + (sdodson@redhat.com) +- atomic-openshift-installer: Populate new_nodes group (smunilla@redhat.com) +- Always pull docker images (sdodson@redhat.com) + * Mon Jan 11 2016 Kenny Woodson 3.0.28-1 - added the rhe7-host-monitoring service file (mwoodson@redhat.com) - Fixing tab completion for latest metadata changes (kwoodson@redhat.com) -- cgit v1.2.3 From e9bd1b8d47f18eba817f426ae66cb75f32fb40c5 Mon Sep 17 00:00:00 2001 From: Joel Diaz Date: Wed, 13 Jan 2016 15:08:07 -0500 Subject: Automatic commit of package [openshift-ansible] release [3.0.30-1]. --- .tito/packages/openshift-ansible | 2 +- openshift-ansible.spec | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 7cb6daa25..5d4f58c88 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.0.29-1 ./ +3.0.30-1 ./ diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 285bda366..59c780a31 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.0.29 +Version: 3.0.30 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -259,6 +259,9 @@ Atomic OpenShift Utilities includes %changelog +* Wed Jan 13 2016 Joel Diaz 3.0.30-1 +- Add -A and detail --v3 flags + * Wed Jan 13 2016 Brenton Leanhardt 3.0.29-1 - 3.1.1 upgrade playbook (bleanhar@redhat.com) - Updated help menu for v3 flag (kwoodson@redhat.com) -- cgit v1.2.3 From e69887a19096da1586bbdb157b7f4bb037b81e45 Mon Sep 17 00:00:00 2001 From: Thomas Wiest Date: Thu, 14 Jan 2016 10:44:26 -0500 Subject: added anchors --- docs/best_practices_guide.adoc | 67 ++++++++++++++++++++++++++++-------------- 1 file changed, 45 insertions(+), 22 deletions(-) diff --git a/docs/best_practices_guide.adoc b/docs/best_practices_guide.adoc index 6b744333c..267aa850d 100644 --- a/docs/best_practices_guide.adoc +++ b/docs/best_practices_guide.adoc @@ -13,9 +13,12 @@ This guide complies with https://www.ietf.org/rfc/rfc2119.txt[RFC2119]. == Pull Requests + + +[[All-pull-requests-MUST-pass-the-build-bot-before-they-are-merged]] [cols="2v,v"] |=== -| **Rule** +| <> | All pull requests MUST pass the build bot *before* they are merged. |=== @@ -30,9 +33,10 @@ The tooling is flexible enough that exceptions can be made so that the tool the === Python Source Files ''' +[[Python-source-files-MUST-contain-the-following-vim-mode-line]] [cols="2v,v"] |=== -| **Rule** +| <> | Python source files MUST contain the following vim mode line. |=== @@ -48,9 +52,10 @@ If mode lines for other editors are needed, please open a GitHub issue. === Method Signatures ''' +[[When-adding-a-new-paramemter-to-an-existing-method-a-default-value-SHOULD-be-used]] [cols="2v,v"] |=== -| **Rule** +| <> | When adding a new paramemter to an existing method, a default value SHOULD be used |=== The purpose of this rule is to make it so that method signatures are backwards compatible. @@ -74,18 +79,20 @@ def add_person(first_name, last_name, age=None): http://www.pylint.org/[PyLint] is used in an attempt to keep the python code as clean and as managable as possible. The build bot runs each pull request through PyLint and any warnings or errors cause the build bot to fail the pull request. ''' +[[PyLint-rules-MUST-NOT-be-disabled-on-a-whole-file]] [cols="2v,v"] |=== -| **Rule** +| <> | PyLint rules MUST NOT be disabled on a whole file. |=== Instead, http://docs.pylint.org/faq.html#is-it-possible-to-locally-disable-a-particular-message[disable the PyLint check on the line where PyLint is complaining]. ''' +[[PyLint-rules-MUST-NOT-be-disabled-unless-they-meet-one-of-the-following-exceptions]] [cols="2v,v"] |=== -| **Rule** +| <> | PyLint rules MUST NOT be disabled unless they meet one of the following exceptions |=== @@ -95,9 +102,10 @@ Instead, http://docs.pylint.org/faq.html#is-it-possible-to-locally-disable-a-par 1. When PyLint fails, but the code makes more sense the way it is formatted (stylistic exception). For this exception, the description of the PyLint disable MUST state why the code is more clear, AND the person reviewing the PR will decide if they agree or not. The reviewer may reject the PR if they disagree with the reason for the disable. ''' +[[All-PyLint-rule-disables-MUST-be-documented-in-the-code]] [cols="2v,v"] |=== -| **Rule** +| <> | All PyLint rule disables MUST be documented in the code. |=== @@ -124,9 +132,10 @@ metadata[line] = results.pop() === Yaml Files (Playbooks, Roles, Vars, etc) ''' +[[Ansible-files-SHOULD-NOT-use-JSON-use-pure-YAML-instead]] [cols="2v,v"] |=== -| **Rule** +| <> | Ansible files SHOULD NOT use JSON (use pure YAML instead). |=== @@ -144,9 +153,10 @@ Every effort should be made to keep our Ansible YAML files in pure YAML. === Modules ''' +[[Custom-Ansible-modules-SHOULD-be-embedded-in-a-role]] [cols="2v,v"] |=== -| **Rule** +| <> | Custom Ansible modules SHOULD be embedded in a role. |=== @@ -177,9 +187,10 @@ The purpose of this rule is to make it easy to include custom modules in our pla ''' +[[Parameters-to-Ansible-modules-SHOULD-use-the-Yaml-dictionary-format-when-3-or-more-parameters-are-being-passed]] [cols="2v,v"] |=== -| **Rule** +| <> | Parameters to Ansible modules SHOULD use the Yaml dictionary format when 3 or more parameters are being passed |=== @@ -204,9 +215,10 @@ When a module has several parameters that are being passed in, it's hard to see ''' +[[Parameters-to-Ansible-modules-SHOULD-use-the-Yaml-dictionary-format-when-the-line-length-exceeds-120-characters]] [cols="2v,v"] |=== -| **Rule** +| <> | Parameters to Ansible modules SHOULD use the Yaml dictionary format when the line length exceeds 120 characters |=== @@ -228,9 +240,10 @@ Lines that are long quickly become a wall of text that isn't easily parsable. It ---- ''' +[[The-Ansible-command-module-SHOULD-be-used-instead-of-the-Ansible-shell-module]] [cols="2v,v"] |=== -| **Rule** +| <> | The Ansible `command` module SHOULD be used instead of the Ansible `shell` module. |=== .Context @@ -251,9 +264,10 @@ The Ansible `shell` module can run most commands that can be run from a bash CLI ---- ''' +[[The-Ansible-quote-filter-MUST-be-used-with-any-variable-passed-into-the-shell-module]] [cols="2v,v"] |=== -| **Rule** +| <> | The Ansible `quote` filter MUST be used with any variable passed into the shell module. |=== .Context @@ -279,9 +293,10 @@ It is recommended not to use the `shell` module. However, if it absolutely must * http://docs.ansible.com/fail_module.html[Ansible Fail Module] ''' +[[Ansible-playbooks-MUST-begin-with-checks-for-any-variables-that-they-require]] [cols="2v,v"] |=== -| **Rule** +| <> | Ansible playbooks MUST begin with checks for any variables that they require. |=== @@ -299,9 +314,10 @@ If an Ansible playbook requires certain variables to be set, it's best to check ---- ''' +[[Ansible-roles-tasks-main-yml-file-MUST-begin-with-checks-for-any-variables-that-they-require]] [cols="2v,v"] |=== -| **Rule** +| <> | Ansible roles tasks/main.yml file MUST begin with checks for any variables that they require. |=== @@ -318,9 +334,10 @@ If an Ansible role requires certain variables to be set, it's best to check for === Tasks ''' +[[Ansible-tasks-SHOULD-NOT-be-used-in-ansible-playbooks-Instead-use-pre_tasks-and-post_tasks]] [cols="2v,v"] |=== -| **Rule** +| <> | Ansible tasks SHOULD NOT be used in ansible playbooks. Instead, use pre_tasks and post_tasks. |=== An Ansible play is defined as a Yaml dictionary. Because of that, ansible doesn't know if the play's tasks list or roles list was specified first. Therefore Ansible always runs tasks after roles. @@ -370,9 +387,10 @@ Therefore, we SHOULD use pre_tasks and post_tasks to make it more clear when the === Roles ''' +[[All-tasks-in-a-role-SHOULD-be-tagged-with-the-role-name]] [cols="2v,v"] |=== -| **Rule** +| <> | All tasks in a role SHOULD be tagged with the role name. |=== @@ -395,9 +413,10 @@ This is very useful when developing and debugging new tasks. It can also signifi ''' +[[The-Ansible-roles-directory-MUST-maintain-a-flat-structure]] [cols="2v,v"] |=== -| **Rule** +| <> | The Ansible roles directory MUST maintain a flat structure. |=== @@ -410,9 +429,10 @@ This is very useful when developing and debugging new tasks. It can also signifi * Make it compatible with Ansible Galaxy ''' +[[Ansible-Roles-SHOULD-be-named-like-technology_component_subcomponent]] [cols="2v,v"] |=== -| **Rule** +| [[Ansible-Roles-SHOULD-be-named-like-technology_component_subcomponent, Rule]] | Ansible Roles SHOULD be named like technology_component[_subcomponent]. |=== @@ -430,9 +450,10 @@ Many times the `technology` portion of the pattern will line up with a package n * http://jinja.pocoo.org/docs/dev/templates/#builtin-filters[Jinja2 Builtin Filters] ''' +[[The-default-filter-SHOULD-replace-empty-strings-lists-etc]] [cols="2v,v"] |=== -| **Rule** +| <> | The `default` filter SHOULD replace empty strings, lists, etc. |=== @@ -469,15 +490,17 @@ This is almost always more desirable than an empty list, string, etc. === Yum and DNF ''' +[[Package-installation-MUST-use-ansible-action-module-to-abstract-away-dnf-yum]] [cols="2v,v"] |=== -| **Rule** +| <> | Package installation MUST use ansible action module to abstract away dnf/yum. -| Package installation MUST use name= and state=present rather than pkg= and state=installed respectively. |=== + +[[Package-installation-MUST-use-name-and-state-present-rather-than-pkg-and-state-installed-respectively]] [cols="2v,v"] |=== -| **Rule** +| <> | Package installation MUST use name= and state=present rather than pkg= and state=installed respectively. |=== -- cgit v1.2.3 From e99eda725ba65eeb0d1c13ee1bd3e8737b9d3602 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Thu, 14 Jan 2016 11:29:59 -0500 Subject: Check api prior to starting node. --- playbooks/common/openshift-node/config.yml | 4 ---- roles/openshift_node/tasks/main.yml | 15 +++++++++++++++ 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 336cbed5e..8d0c4945e 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -229,9 +229,5 @@ delay: 1 changed_when: false when: openshift.common.is_containerized | bool - - fail: - msg: > - Unable to contact master API at {{ openshift.master.api_url }} - when: openshift.common.is_containerized | bool and api_available_output.stdout.find("200 OK") == -1 roles: - openshift_manage_node diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 0828d8e2c..9035248f9 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -103,6 +103,21 @@ - name: Additional storage plugin configuration include: storage_plugins/main.yml +# Necessary because when you're on a node that's also a master the master will be +# restarted after the node restarts docker and it will take up to 60 seconds for +# systemd to start the master again +- name: Wait for master API to become available before proceeding + # Using curl here since the uri module requires python-httplib2 and + # wait_for port doesn't provide health information. + command: > + curl -k --head --silent {{ openshift_node_master_api_url }} + register: api_available_output + until: api_available_output.stdout.find("200 OK") != -1 + retries: 120 + delay: 1 + changed_when: false + when: openshift.common.is_containerized | bool + - name: Start and enable node service: name={{ openshift.common.service_type }}-node enabled=yes state=started register: start_result -- cgit v1.2.3 From f374aa24e9a161331a52a822b373d196a35037aa Mon Sep 17 00:00:00 2001 From: Brenton Leanhardt Date: Thu, 14 Jan 2016 13:58:22 -0500 Subject: Automatic commit of package [openshift-ansible] release [3.0.31-1]. --- .tito/packages/openshift-ansible | 2 +- openshift-ansible.spec | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 5d4f58c88..95696495d 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.0.30-1 ./ +3.0.31-1 ./ diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 59c780a31..96710cee2 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.0.30 +Version: 3.0.31 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -259,6 +259,10 @@ Atomic OpenShift Utilities includes %changelog +* Thu Jan 14 2016 Brenton Leanhardt 3.0.31-1 +- Check api prior to starting node. (abutcher@redhat.com) +- added anchors (twiest@redhat.com) + * Wed Jan 13 2016 Joel Diaz 3.0.30-1 - Add -A and detail --v3 flags -- cgit v1.2.3 From 176cfb5c4b8cee5b018b40c4c4b1b2042e1cfa8d Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Thu, 14 Jan 2016 14:19:03 -0500 Subject: Uninstall remove containerized wrapper and symlinks --- playbooks/adhoc/uninstall.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index ac20f5f9b..36d686c8b 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -202,6 +202,10 @@ - /usr/lib/systemd/system/atomic-openshift-master-controllers.service - /usr/lib/systemd/system/origin-master-api.service - /usr/lib/systemd/system/origin-master-controllers.service + - /usr/local/bin/openshift + - /usr/local/bin/oadm + - /usr/local/bin/oc + - /usr/local/bin/kubectl # Since we are potentially removing the systemd unit files for separated # master-api and master-controllers services, so we need to reload the -- cgit v1.2.3 From d98926d37411b09f77ac3f053f0c2e5e26122f3b Mon Sep 17 00:00:00 2001 From: Brenton Leanhardt Date: Thu, 14 Jan 2016 14:21:54 -0500 Subject: Automatic commit of package [openshift-ansible] release [3.0.32-1]. --- .tito/packages/openshift-ansible | 2 +- openshift-ansible.spec | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 95696495d..d85882bf9 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.0.31-1 ./ +3.0.32-1 ./ diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 96710cee2..c8f6a2673 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.0.31 +Version: 3.0.32 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -259,6 +259,9 @@ Atomic OpenShift Utilities includes %changelog +* Thu Jan 14 2016 Brenton Leanhardt 3.0.32-1 +- Uninstall remove containerized wrapper and symlinks (abutcher@redhat.com) + * Thu Jan 14 2016 Brenton Leanhardt 3.0.31-1 - Check api prior to starting node. (abutcher@redhat.com) - added anchors (twiest@redhat.com) -- cgit v1.2.3 From c5d4f207b0f77048868fa946048f0f10d40fc827 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Thu, 14 Jan 2016 14:49:23 -0500 Subject: Update ec2.ini - Set rds and elasticache to False, since they are not needed for the playbooks and cause issues for users without those services enabled. --- inventory/aws/hosts/ec2.ini | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/inventory/aws/hosts/ec2.ini b/inventory/aws/hosts/ec2.ini index 1f503b8cf..aa0f9090f 100644 --- a/inventory/aws/hosts/ec2.ini +++ b/inventory/aws/hosts/ec2.ini @@ -45,10 +45,10 @@ vpc_destination_variable = ip_address route53 = False # To exclude RDS instances from the inventory, uncomment and set to False. -#rds = False +rds = False # To exclude ElastiCache instances from the inventory, uncomment and set to False. -#elasticache = False +elasticache = False # Additionally, you can specify the list of zones to exclude looking up in # 'route53_excluded_zones' as a comma-separated list. -- cgit v1.2.3 From 8eaac9ee3f8a8f5e903ac79b4a3bc5e5b7d440c7 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Thu, 14 Jan 2016 16:47:56 -0500 Subject: Ensure nfs-utils installed for non-atomic hosts. --- roles/openshift_node/tasks/storage_plugins/nfs.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/roles/openshift_node/tasks/storage_plugins/nfs.yml b/roles/openshift_node/tasks/storage_plugins/nfs.yml index 1edf21d9b..14a613786 100644 --- a/roles/openshift_node/tasks/storage_plugins/nfs.yml +++ b/roles/openshift_node/tasks/storage_plugins/nfs.yml @@ -1,4 +1,8 @@ --- +- name: Install NFS storage plugin dependencies + action: "{{ ansible_pkg_mgr }} name=nfs-utils state=present" + when: not openshift.common.is_atomic | bool + - name: Set seboolean to allow nfs storage plugin access from containers seboolean: name: virt_use_nfs -- cgit v1.2.3 From 0f2ec2a8412eab9b7fd8f1411aa969f955158a3e Mon Sep 17 00:00:00 2001 From: Thomas Wiest Date: Fri, 15 Jan 2016 10:16:06 -0500 Subject: Added anchors for rules in style_guide.adoc in order to make it easier to reference specific rules in PRs. --- docs/style_guide.adoc | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/docs/style_guide.adoc b/docs/style_guide.adoc index 09d4839c7..72eaedcf9 100644 --- a/docs/style_guide.adoc +++ b/docs/style_guide.adoc @@ -19,9 +19,10 @@ This style guide complies with https://www.ietf.org/rfc/rfc2119.txt[RFC2119]. * https://www.python.org/dev/peps/pep-0008/#maximum-line-length[Python Pep8 Line Length] ''' +[[All-lines-SHOULD-be-no-longer-than-80-characters]] [cols="2v,v"] |=== -| **Rule** +| <> | All lines SHOULD be no longer than 80 characters. |=== @@ -31,9 +32,10 @@ Code readability is subjective, therefore pull-requests SHOULD still be merged, ''' +[[All-lines-MUST-be-no-longer-than-120-characters]] [cols="2v,v"] |=== -| **Rule** +| <> | All lines MUST be no longer than 120 characters. |=== @@ -46,9 +48,10 @@ This is a hard limit and is enforced by the build bot. This check MUST NOT be di === Ansible Yaml file extension ''' +[[All-Ansible-Yaml-files-MUST-have-a-yml-extension-and-NOT-YML-yaml-etc]] [cols="2v,v"] |=== -| **Rule** +| <> | All Ansible Yaml files MUST have a .yml extension (and NOT .YML, .yaml etc). |=== @@ -59,9 +62,10 @@ Example: `tasks.yml` === Ansible CLI Variables ''' +[[Variables-meant-to-be-passed-in-from-the-ansible-CLI-MUST-have-a-prefix-of-cli]] [cols="2v,v"] |=== -| **Rule** +| <> | Variables meant to be passed in from the ansible CLI MUST have a prefix of cli_ |=== @@ -76,9 +80,10 @@ ansible-playbook -e cli_foo=bar someplays.yml === Ansible Global Variables ''' +[[Global-variables-MUST-have-a-prefix-of-g]] [cols="2v,v"] |=== -| **Rule** +| <> | Global variables MUST have a prefix of g_ |=== Ansible global variables are defined as any variables outside of ansible roles. Examples include playbook variables, variables passed in on the cli, etc. @@ -94,9 +99,10 @@ g_environment: someval Ansible role variables are defined as variables contained in (or passed into) a role. ''' +[[Role-variables-MUST-have-a-prefix-of-atleast-3-characters-See.below.for.specific.naming.rules]] [cols="2v,v"] |=== -| **Rule** +| <> | Role variables MUST have a prefix of atleast 3 characters. See below for specific naming rules. |=== -- cgit v1.2.3 From 8c2ef6e1192006fea958e277cef5e8d9672476a3 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Fri, 15 Jan 2016 10:44:54 -0500 Subject: Call attention to openshift_master_rolling_restart_mode variable in restart prompt. --- playbooks/common/openshift-master/restart.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml index fa13a64cb..987fae63c 100644 --- a/playbooks/common/openshift-master/restart.yml +++ b/playbooks/common/openshift-master/restart.yml @@ -57,8 +57,10 @@ Warning: Running playbook from a host that will be restarted! Press CTRL+C and A to abort playbook execution. You may continue by pressing ENTER but the playbook will stop - executing once this system restarts and services must be - manually verified. + executing after this system has been restarted and services + must be verified manually. To only restart services, set + openshift_master_rolling_restart_mode=services in host + inventory and relaunch the playbook. when: exists.stat.exists and openshift.common.rolling_restart_mode == 'system' - set_fact: current_host: "{{ exists.stat.exists }}" -- cgit v1.2.3 From 33ac044082ebdeea44f8c6e58450e580311f319d Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Fri, 15 Jan 2016 13:11:57 -0500 Subject: Configure nodes which are also masters prior to nodes in containerized install. --- playbooks/common/openshift-node/config.yml | 46 ++++++++++++++++++++++++------ 1 file changed, 38 insertions(+), 8 deletions(-) diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 8d0c4945e..1d31657ed 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -154,21 +154,15 @@ validate_checksum: yes with_items: nodes_needing_certs -- name: Configure node instances +- name: Deploy node certificates hosts: oo_nodes_to_config vars: sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}" - openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" - # TODO: Prefix flannel role variables. - etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}" - embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" - openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" - pre_tasks: + tasks: - name: Ensure certificate directory exists file: path: "{{ node_cert_dir }}" state: directory - # TODO: notify restart node # possibly test service started time against certificate/config file # timestamps in node to trigger notify @@ -177,8 +171,44 @@ src: "{{ sync_tmpdir }}/{{ node_subdir }}.tgz" dest: "{{ node_cert_dir }}" when: certs_missing + +- name: Evaluate node groups + hosts: localhost + become: no + tasks: + - name: Evaluate oo_containerized_master_nodes + add_host: + name: "{{ item }}" + groups: oo_containerized_master_nodes + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: "{{ groups.oo_nodes_to_config | default([]) }}" + when: hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config) + +- name: Configure node instances + hosts: oo_containerized_master_nodes + serial: 1 + vars: + openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" + openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" + roles: + - openshift_node + +- name: Configure node instances + hosts: oo_nodes_to_config:!oo_containerized_master_nodes + vars: + openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" + openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" roles: - openshift_node + +- name: Additional node config + hosts: oo_nodes_to_config + vars: + # TODO: Prefix flannel role variables. + etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}" + embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" + roles: - role: flannel when: openshift.common.use_flannel | bool - role: nickhammond.logrotate -- cgit v1.2.3 From bb1f8aa029078238cf32b038c8bffacb13c3765e Mon Sep 17 00:00:00 2001 From: Brenton Leanhardt Date: Fri, 15 Jan 2016 15:17:50 -0500 Subject: Automatic commit of package [openshift-ansible] release [3.0.33-1]. --- .tito/packages/openshift-ansible | 2 +- openshift-ansible.spec | 11 ++++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index d85882bf9..b94e78aea 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.0.32-1 ./ +3.0.33-1 ./ diff --git a/openshift-ansible.spec b/openshift-ansible.spec index c8f6a2673..1cc350b81 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.0.32 +Version: 3.0.33 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -259,6 +259,15 @@ Atomic OpenShift Utilities includes %changelog +* Fri Jan 15 2016 Brenton Leanhardt 3.0.33-1 +- Configure nodes which are also masters prior to nodes in containerized + install. (abutcher@redhat.com) +- Call attention to openshift_master_rolling_restart_mode variable in restart + prompt. (abutcher@redhat.com) +- Added anchors for rules in style_guide.adoc in order to make it easier to + reference specific rules in PRs. (twiest@redhat.com) +- Update ec2.ini (jdetiber@redhat.com) + * Thu Jan 14 2016 Brenton Leanhardt 3.0.32-1 - Uninstall remove containerized wrapper and symlinks (abutcher@redhat.com) -- cgit v1.2.3 From c468a080c78de8bc2ef67d4d1647792fdd4e566c Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Fri, 15 Jan 2016 15:34:26 -0500 Subject: Remove pause after haproxy start --- roles/haproxy/handlers/main.yml | 1 + roles/haproxy/tasks/main.yml | 5 ++--- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/haproxy/handlers/main.yml b/roles/haproxy/handlers/main.yml index ee60adcab..5b8691b26 100644 --- a/roles/haproxy/handlers/main.yml +++ b/roles/haproxy/handlers/main.yml @@ -3,3 +3,4 @@ service: name: haproxy state: restarted + when: not (haproxy_start_result_changed | default(false) | bool) diff --git a/roles/haproxy/tasks/main.yml b/roles/haproxy/tasks/main.yml index 97f870829..0b8370ce2 100644 --- a/roles/haproxy/tasks/main.yml +++ b/roles/haproxy/tasks/main.yml @@ -19,6 +19,5 @@ enabled: yes register: start_result -- name: Pause 30 seconds if haproxy was just started - pause: seconds=30 - when: start_result | changed +- set_fact: + haproxy_start_result_changed: "{{ start_result | changed }}" -- cgit v1.2.3 From 622231f28d5e997bc918ba8d0a2c4a0248f07655 Mon Sep 17 00:00:00 2001 From: Donovan Muller Date: Mon, 18 Jan 2016 13:00:41 +0200 Subject: Host group should be OSEv3 not OSv3 Following on from #1107 the host group's name is OSEv3 and not OSv3 --- playbooks/adhoc/bootstrap-fedora.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/adhoc/bootstrap-fedora.yml b/playbooks/adhoc/bootstrap-fedora.yml index de9f36c8a..0df77e309 100644 --- a/playbooks/adhoc/bootstrap-fedora.yml +++ b/playbooks/adhoc/bootstrap-fedora.yml @@ -1,4 +1,4 @@ -- hosts: OSv3 +- hosts: OSEv3 gather_facts: false tasks: - name: install python and deps for ansible modules -- cgit v1.2.3 From 38f13a5663db409bc9552eeae817d8c24dc0ae97 Mon Sep 17 00:00:00 2001 From: Samuel Munilla Date: Mon, 18 Jan 2016 08:45:06 -0500 Subject: atomic-openshift-installer: Remove containerized install for 3.0 This removes the option to specify a containerized install when installing 3.0 in interactive mode. --- utils/src/ooinstall/cli_installer.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index 4e30929da..1aacf3a4b 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -127,14 +127,13 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen masters_set = True host_props['node'] = True - #TODO: Reenable this option once container installs are out of tech preview - rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?', - type=click.Choice(['rpm', 'container']), - default='rpm') - if rpm_or_container == 'container': - host_props['containerized'] = True - else: - host_props['containerized'] = False + host_props['containerized'] = False + if oo_cfg.settings['variant_version'] != '3.0': + rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?', + type=click.Choice(['rpm', 'container']), + default='rpm') + if rpm_or_container == 'container': + host_props['containerized'] = True if existing_env: host_props['new_host'] = True -- cgit v1.2.3 From f85591ac515043a4d91bbd1d22c5cb63bdcfeb9b Mon Sep 17 00:00:00 2001 From: Donovan Muller Date: Mon, 18 Jan 2016 16:56:35 +0200 Subject: Use IdentityFile instead of PrivateKey Fixes #1196 --- README_AWS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README_AWS.md b/README_AWS.md index f8ecaec49..c605de43d 100644 --- a/README_AWS.md +++ b/README_AWS.md @@ -51,7 +51,7 @@ to setup a private key file to allow ansible to connect to the created hosts. To do so, add the the following entry to your $HOME/.ssh/config file and make it point to the private key file which allows you to login on AWS. ``` Host *.compute-1.amazonaws.com - PrivateKey $HOME/.ssh/my_private_key.pem + IdentityFile $HOME/.ssh/my_private_key.pem ``` Alternatively, you can configure your ssh-agent to hold the credentials to connect to your AWS instances. -- cgit v1.2.3 From cc7b1a3f585a86c21eb88d850d963148b98c9f23 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Mon, 18 Jan 2016 10:29:48 -0500 Subject: Fix cluster_method conditional in master restart playbook. --- playbooks/common/openshift-master/restart.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml index 987fae63c..5c8e34817 100644 --- a/playbooks/common/openshift-master/restart.yml +++ b/playbooks/common/openshift-master/restart.yml @@ -73,12 +73,12 @@ command: > systemctl is-active {{ openshift.common.service_type }}-master register: active_check_output - when: openshift.master.cluster_method == 'pacemaker' + when: openshift.master.cluster_method | default(None) == 'pacemaker' failed_when: active_check_output.stdout not in ['active', 'inactive'] changed_when: false - set_fact: is_active: "{{ active_check_output.stdout == 'active' }}" - when: openshift.master.cluster_method == 'pacemaker' + when: openshift.master.cluster_method | default(None) == 'pacemaker' - name: Evaluate master groups hosts: localhost -- cgit v1.2.3 From 5273611da653bca4a15cfe57e4acc57aef089a37 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Mon, 18 Jan 2016 10:44:37 -0500 Subject: Add 'unknown' to possible output for the is-active check. --- playbooks/common/openshift-master/restart.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml index 5c8e34817..d9d857b1a 100644 --- a/playbooks/common/openshift-master/restart.yml +++ b/playbooks/common/openshift-master/restart.yml @@ -74,7 +74,7 @@ systemctl is-active {{ openshift.common.service_type }}-master register: active_check_output when: openshift.master.cluster_method | default(None) == 'pacemaker' - failed_when: active_check_output.stdout not in ['active', 'inactive'] + failed_when: active_check_output.stdout not in ['active', 'inactive', 'unknown'] changed_when: false - set_fact: is_active: "{{ active_check_output.stdout == 'active' }}" -- cgit v1.2.3 From eb6cb4ff6cdd050bf20386a948d03814f069fe5a Mon Sep 17 00:00:00 2001 From: Samuel Munilla Date: Mon, 18 Jan 2016 11:13:22 -0500 Subject: atomic-openshift-installer: add containerized to inventory Updates to actually check the containerized setting and add it to the inventory. --- utils/src/ooinstall/openshift_ansible.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 20401f812..c0d115fdc 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -128,6 +128,8 @@ def write_host(host, inventory, schedulable=None): facts += ' openshift_hostname={}'.format(host.hostname) if host.public_hostname: facts += ' openshift_public_hostname={}'.format(host.public_hostname) + if host.containerized: + facts += ' containerized={}'.format(host.containerized) # TODO: For not write_host is handles both master and nodes. # Technically only nodes will ever need this. -- cgit v1.2.3 From 739a3af877fb1728b8ad1838aa524e089c7f5ffd Mon Sep 17 00:00:00 2001 From: Joel Diaz Date: Mon, 18 Jan 2016 11:03:23 -0500 Subject: clean up too-many-branches / logic --- roles/lib_zabbix/library/zbx_action.py | 147 ++++++++++++++++++++------------- 1 file changed, 91 insertions(+), 56 deletions(-) diff --git a/roles/lib_zabbix/library/zbx_action.py b/roles/lib_zabbix/library/zbx_action.py index c08bef4f7..2f9524556 100644 --- a/roles/lib_zabbix/library/zbx_action.py +++ b/roles/lib_zabbix/library/zbx_action.py @@ -81,6 +81,61 @@ def filter_differences(zabbix_filters, user_filters): return rval +def opconditions_diff(zab_val, user_val): + ''' Report whether there are differences between opconditions on + zabbix and opconditions supplied by user ''' + + if len(zab_val) != len(user_val): + return True + + for z_cond, u_cond in zip(zab_val, user_val): + if not all([str(u_cond[op_key]) == z_cond[op_key] for op_key in \ + ['conditiontype', 'operator', 'value']]): + return True + + return False + +def opmessage_diff(zab_val, user_val): + ''' Report whether there are differences between opmessage on + zabbix and opmessage supplied by user ''' + + for op_msg_key, op_msg_val in user_val.items(): + if zab_val[op_msg_key] != str(op_msg_val): + return True + + return False + +def opmessage_grp_diff(zab_val, user_val): + ''' Report whether there are differences between opmessage_grp + on zabbix and opmessage_grp supplied by user ''' + + zab_grp_ids = set([ugrp['usrgrpid'] for ugrp in zab_val]) + usr_grp_ids = set([ugrp['usrgrpid'] for ugrp in user_val]) + if usr_grp_ids != zab_grp_ids: + return True + + return False + +def opmessage_usr_diff(zab_val, user_val): + ''' Report whether there are differences between opmessage_usr + on zabbix and opmessage_usr supplied by user ''' + + zab_usr_ids = set([usr['usrid'] for usr in zab_val]) + usr_ids = set([usr['usrid'] for usr in user_val]) + if usr_ids != zab_usr_ids: + return True + + return False + +def opcommand_diff(zab_op_cmd, usr_op_cmd): + ''' Check whether user-provided opcommand matches what's already + stored in Zabbix ''' + + for usr_op_cmd_key, usr_op_cmd_val in usr_op_cmd.items(): + if zab_op_cmd[usr_op_cmd_key] != str(usr_op_cmd_val): + return True + return False + def host_in_zabbix(zab_hosts, usr_host): ''' Check whether a particular user host is already in the Zabbix list of hosts ''' @@ -106,23 +161,11 @@ def hostlist_in_zabbix(zab_hosts, usr_hosts): return True -def opcommand_diff(zab_op_cmd, usr_op_cmd): - ''' Check whether user-provided opcommand matches what's already - stored in Zabbix ''' - - for usr_op_cmd_key, usr_op_cmd_val in usr_op_cmd.items(): - if zab_op_cmd[usr_op_cmd_key] != str(usr_op_cmd_val): - return True - return False - -# This logic is quite complex. We are comparing two lists of dictionaries. -# The outer for-loops allow us to descend down into both lists at the same time -# and then walk over the key,val pairs of the incoming user dict's changes -# or updates. The if-statements are looking at different sub-object types and -# comparing them. The other suggestion on how to write this is to write a recursive -# compare function but for the time constraints and for complexity I decided to go -# this route. -# pylint: disable=too-many-branches +# We are comparing two lists of dictionaries (the one stored on zabbix and the +# one the user is providing). For each type of operation, determine whether there +# is a difference between what is stored on zabbix and what the user is providing. +# If there is a difference, we take the user-provided data for what needs to +# be stored/updated into zabbix. def operation_differences(zabbix_ops, user_ops): '''Determine the differences from user and zabbix for operations''' @@ -132,49 +175,41 @@ def operation_differences(zabbix_ops, user_ops): rval = {} for zab, user in zip(zabbix_ops, user_ops): - for key, val in user.items(): - if key == 'opconditions': - if len(zab[key]) != len(val): - rval[key] = val - break - for z_cond, u_cond in zip(zab[key], user[key]): - if not all([str(u_cond[op_key]) == z_cond[op_key] for op_key in \ - ['conditiontype', 'operator', 'value']]): - rval[key] = val - break - elif key == 'opmessage': - # Verify each passed param matches - for op_msg_key, op_msg_val in val.items(): - if zab[key][op_msg_key] != str(op_msg_val): - rval[key] = val - break - - elif key == 'opmessage_grp': - zab_grp_ids = set([ugrp['usrgrpid'] for ugrp in zab[key]]) - usr_grp_ids = set([ugrp['usrgrpid'] for ugrp in val]) - if usr_grp_ids != zab_grp_ids: - rval[key] = val - - elif key == 'opmessage_usr': - zab_usr_ids = set([usr['userid'] for usr in zab[key]]) - usr_ids = set([usr['userid'] for usr in val]) - if usr_ids != zab_usr_ids: - rval[key] = val - - elif key == 'opcommand': - if opcommand_diff(zab[key], val): - rval[key] = val - break + for oper in user.keys(): + if oper == 'opconditions' and opconditions_diff(zab[oper], \ + user[oper]): + rval[oper] = user[oper] + + elif oper == 'opmessage' and opmessage_diff(zab[oper], \ + user[oper]): + rval[oper] = user[oper] + + elif oper == 'opmessage_grp' and opmessage_grp_diff(zab[oper], \ + user[oper]): + rval[oper] = user[oper] + + elif oper == 'opmessage_usr' and opmessage_usr_diff(zab[oper], \ + user[oper]): + rval[oper] = user[oper] + + elif oper == 'opcommand' and opcommand_diff(zab[oper], \ + user[oper]): + rval[oper] = user[oper] # opcommand_grp can be treated just like opcommand_hst # as opcommand_grp[] is just a list of groups - elif key == 'opcommand_hst' or key == 'opcommand_grp': - if not hostlist_in_zabbix(zab[key], val): - rval[key] = val - break + elif oper == 'opcommand_hst' or oper == 'opcommand_grp': + if not hostlist_in_zabbix(zab[oper], user[oper]): + rval[oper] = user[oper] + + # if it's any other type of operation than the ones tested above + # just do a direct compare + elif oper not in ['opconditions', 'opmessage', 'opmessage_grp', + 'opmessage_usr', 'opcommand', 'opcommand_hst', + 'opcommand_grp'] \ + and str(zab[oper]) != str(user[oper]): + rval[oper] = user[oper] - elif zab[key] != str(val): - rval[key] = val return rval def get_users(zapi, users): -- cgit v1.2.3 From b0511f962acdf3211b9b628afc9f8a969b79e834 Mon Sep 17 00:00:00 2001 From: Brenton Leanhardt Date: Mon, 18 Jan 2016 14:51:36 -0500 Subject: Automatic commit of package [openshift-ansible] release [3.0.34-1]. --- .tito/packages/openshift-ansible | 2 +- openshift-ansible.spec | 17 ++++++++++++++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index b94e78aea..d397467cb 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.0.33-1 ./ +3.0.34-1 ./ diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 1cc350b81..37117feac 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.0.33 +Version: 3.0.34 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -259,6 +259,21 @@ Atomic OpenShift Utilities includes %changelog +* Mon Jan 18 2016 Brenton Leanhardt 3.0.34-1 +- clean up too-many-branches / logic (jdiaz@redhat.com) +- atomic-openshift-installer: add containerized to inventory + (smunilla@redhat.com) +- Add 'unknown' to possible output for the is-active check. + (abutcher@redhat.com) +- Fix cluster_method conditional in master restart playbook. + (abutcher@redhat.com) +- Use IdentityFile instead of PrivateKey (donovan.muller@gmail.com) +- atomic-openshift-installer: Remove containerized install for 3.0 + (smunilla@redhat.com) +- Host group should be OSEv3 not OSv3 (donovan.muller@gmail.com) +- Remove pause after haproxy start (abutcher@redhat.com) +- Ensure nfs-utils installed for non-atomic hosts. (abutcher@redhat.com) + * Fri Jan 15 2016 Brenton Leanhardt 3.0.33-1 - Configure nodes which are also masters prior to nodes in containerized install. (abutcher@redhat.com) -- cgit v1.2.3 From 5fa0543baaf0403b4a8fb4ccfae3b5723baa8ae1 Mon Sep 17 00:00:00 2001 From: Matt Woodson Date: Mon, 18 Jan 2016 15:01:45 -0500 Subject: added oso_moniotoring tools role --- playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml | 4 +- roles/oso_monitoring_tools/README.md | 54 +++++++++++++++++++++++ roles/oso_monitoring_tools/defaults/main.yml | 2 + roles/oso_monitoring_tools/handlers/main.yml | 2 + roles/oso_monitoring_tools/meta/main.yml | 8 ++++ roles/oso_monitoring_tools/tasks/main.yml | 17 +++++++ roles/oso_monitoring_tools/vars/main.yml | 12 +++++ 7 files changed, 97 insertions(+), 2 deletions(-) create mode 100644 roles/oso_monitoring_tools/README.md create mode 100644 roles/oso_monitoring_tools/defaults/main.yml create mode 100644 roles/oso_monitoring_tools/handlers/main.yml create mode 100644 roles/oso_monitoring_tools/meta/main.yml create mode 100644 roles/oso_monitoring_tools/tasks/main.yml create mode 100644 roles/oso_monitoring_tools/vars/main.yml diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml index 174cea460..d24e9cafa 100644 --- a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml +++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml @@ -20,7 +20,7 @@ # ansible-playbook -e 'cli_tag_name=ops-compute-12345' grow_docker_vg.yml # # Notes: -# * By default this will do a 55GB GP2 volume. The can be overidden with the "-e 'cli_volume_size=100'" variable +# * By default this will do a 200GB GP2 volume. The can be overidden with the "-e 'cli_volume_size=100'" variable # * This does a GP2 by default. Support for Provisioned IOPS has not been added # * This will assign the new volume to /dev/xvdc. This is not variablized, yet. # * This can be done with NO downtime on the host @@ -36,7 +36,7 @@ vars: cli_volume_type: gp2 - cli_volume_size: 55 + cli_volume_size: 200 # cli_volume_iops: "{{ 30 * cli_volume_size }}" pre_tasks: diff --git a/roles/oso_monitoring_tools/README.md b/roles/oso_monitoring_tools/README.md new file mode 100644 index 000000000..4215f9eeb --- /dev/null +++ b/roles/oso_monitoring_tools/README.md @@ -0,0 +1,54 @@ +Role Name +========= + +This role will install the Openshift Monitoring Utilities + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +osomt_zagg_client_config + +from vars/main.yml: + +osomt_zagg_client_config: + host: + name: "{{ osomt_host_name }}" + zagg: + url: "{{ osomt_zagg_url }}" + user: "{{ osomt_zagg_user }}" + pass: "{{ osomt_zagg_password }}" + ssl_verify: "{{ osomt_zagg_ssl_verify }}" + verbose: "{{ osomt_zagg_verbose }}" + debug: "{{ osomt_zagg_debug }}" + +Dependencies +------------ + +None + +Example Playbook +---------------- + +- role: "oso_monitoring_tools" + osomt_host_name: hostname + osomt_zagg_url: http://path.to/zagg_web + osomt_zagg_user: admin + osomt_zagg_password: password + osomt_zagg_ssl_verify: True + osomt_zagg_verbose: False + osomt_zagg_debug: False + +License +------- + +BSD + +Author Information +------------------ + +Openshift Operations diff --git a/roles/oso_monitoring_tools/defaults/main.yml b/roles/oso_monitoring_tools/defaults/main.yml new file mode 100644 index 000000000..a17424f25 --- /dev/null +++ b/roles/oso_monitoring_tools/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for oso_monitoring_tools diff --git a/roles/oso_monitoring_tools/handlers/main.yml b/roles/oso_monitoring_tools/handlers/main.yml new file mode 100644 index 000000000..cefa780ab --- /dev/null +++ b/roles/oso_monitoring_tools/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for oso_monitoring_tools diff --git a/roles/oso_monitoring_tools/meta/main.yml b/roles/oso_monitoring_tools/meta/main.yml new file mode 100644 index 000000000..9c42b68dc --- /dev/null +++ b/roles/oso_monitoring_tools/meta/main.yml @@ -0,0 +1,8 @@ +--- +galaxy_info: + author: OpenShift Operations + description: Install Openshift Monitoring tools + company: Red Hat, Inc + license: ASL 2.0 + min_ansible_version: 1.2 +dependencies: [] diff --git a/roles/oso_monitoring_tools/tasks/main.yml b/roles/oso_monitoring_tools/tasks/main.yml new file mode 100644 index 000000000..b165f9a45 --- /dev/null +++ b/roles/oso_monitoring_tools/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# tasks file for oso_monitoring_tools +- name: Install the Openshift Tools RPMS + yum: + name: "{{ item }}" + state: latest + with_items: + - openshift-tools-scripts-monitoring-zagg-client + - python-openshift-tools-monitoring-zagg + +- debug: var=g_zagg_client_config + +- name: Generate the /etc/openshift_tools/zagg_client.yaml config file + copy: + content: "{{ osomt_zagg_client_config | to_nice_yaml }}" + dest: /etc/openshift_tools/zagg_client.yaml + mode: "644" diff --git a/roles/oso_monitoring_tools/vars/main.yml b/roles/oso_monitoring_tools/vars/main.yml new file mode 100644 index 000000000..3538ba30b --- /dev/null +++ b/roles/oso_monitoring_tools/vars/main.yml @@ -0,0 +1,12 @@ +--- +# vars file for oso_monitoring_tools +osomt_zagg_client_config: + host: + name: "{{ osomt_host_name }}" + zagg: + url: "{{ osomt_zagg_url }}" + user: "{{ osomt_zagg_user }}" + pass: "{{ osomt_zagg_password }}" + ssl_verify: "{{ osomt_zagg_ssl_verify }}" + verbose: "{{ osomt_zagg_verbose }}" + debug: "{{ osomt_zagg_debug }}" -- cgit v1.2.3