From e5f4823d6e4191367178f743ddd5e0885598e8cf Mon Sep 17 00:00:00 2001
From: Russell Teague <rteague@redhat.com>
Date: Fri, 3 Nov 2017 17:28:45 -0400
Subject: Playbook Consolidation - Initialization

---
 playbooks/init/evaluate_groups.yml    | 193 ++++++++++++++++++++++++++++++++++
 playbooks/init/facts.yml              | 169 +++++++++++++++++++++++++++++
 playbooks/init/main.yml               |  38 +++++++
 playbooks/init/repos.yml              |   8 ++
 playbooks/init/roles                  |   1 +
 playbooks/init/sanity_checks.yml      |  51 +++++++++
 playbooks/init/validate_hostnames.yml |  23 ++++
 playbooks/init/vars/cluster_hosts.yml |  26 +++++
 playbooks/init/version.yml            |  21 ++++
 9 files changed, 530 insertions(+)
 create mode 100644 playbooks/init/evaluate_groups.yml
 create mode 100644 playbooks/init/facts.yml
 create mode 100644 playbooks/init/main.yml
 create mode 100644 playbooks/init/repos.yml
 create mode 120000 playbooks/init/roles
 create mode 100644 playbooks/init/sanity_checks.yml
 create mode 100644 playbooks/init/validate_hostnames.yml
 create mode 100644 playbooks/init/vars/cluster_hosts.yml
 create mode 100644 playbooks/init/version.yml

(limited to 'playbooks/init')

diff --git a/playbooks/init/evaluate_groups.yml b/playbooks/init/evaluate_groups.yml
new file mode 100644
index 000000000..8787c87e1
--- /dev/null
+++ b/playbooks/init/evaluate_groups.yml
@@ -0,0 +1,193 @@
+---
+- name: Populate config host groups
+  hosts: localhost
+  connection: local
+  become: no
+  gather_facts: no
+  tasks:
+  - name: Load group name mapping variables
+    include_vars: vars/cluster_hosts.yml
+
+  - name: Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required
+    fail:
+      msg: This playbook requires g_etcd_hosts or g_new_etcd_hosts to be set
+    when: g_etcd_hosts is not defined and g_new_etcd_hosts is not defined
+
+  - name: Evaluate groups - g_master_hosts or g_new_master_hosts required
+    fail:
+      msg: This playbook requires g_master_hosts or g_new_master_hosts to be set
+    when: g_master_hosts is not defined and g_new_master_hosts is not defined
+
+  - name: Evaluate groups - g_node_hosts or g_new_node_hosts required
+    fail:
+      msg: This playbook requires g_node_hosts or g_new_node_hosts to be set
+    when: g_node_hosts is not defined and g_new_node_hosts is not defined
+
+  - name: Evaluate groups - g_lb_hosts required
+    fail:
+      msg: This playbook requires g_lb_hosts to be set
+    when: g_lb_hosts is not defined
+
+  - name: Evaluate groups - g_nfs_hosts required
+    fail:
+      msg: This playbook requires g_nfs_hosts to be set
+    when: g_nfs_hosts is not defined
+
+  - name: Evaluate groups - g_nfs_hosts is single host
+    fail:
+      msg: The nfs group must be limited to one host
+    when: g_nfs_hosts | default([]) | length > 1
+
+  - name: Evaluate groups - g_glusterfs_hosts required
+    fail:
+      msg: This playbook requires g_glusterfs_hosts to be set
+    when: g_glusterfs_hosts is not defined
+
+  - name: Evaluate groups - Fail if no etcd hosts group is defined
+    fail:
+      msg: >
+        Running etcd as an embedded service is no longer supported. If this is a
+        new install please define an 'etcd' group with either one or three
+        hosts. These hosts may be the same hosts as your masters. If this is an
+        upgrade you may set openshift_master_unsupported_embedded_etcd=true
+        until a migration playbook becomes available.
+    when:
+    - g_etcd_hosts | default([]) | length not in [3,1]
+    - not openshift_master_unsupported_embedded_etcd | default(False)
+    - not (openshift_node_bootstrap | default(False))
+
+  - name: Evaluate oo_all_hosts
+    add_host:
+      name: "{{ item }}"
+      groups: oo_all_hosts
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_become: "{{ g_sudo | default(omit) }}"
+    with_items: "{{ g_all_hosts | default([]) }}"
+    changed_when: no
+
+  - name: Evaluate oo_masters
+    add_host:
+      name: "{{ item }}"
+      groups: oo_masters
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_become: "{{ g_sudo | default(omit) }}"
+    with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}"
+    changed_when: no
+
+  - name: Evaluate oo_first_master
+    add_host:
+      name: "{{ g_master_hosts[0] }}"
+      groups: oo_first_master
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_become: "{{ g_sudo | default(omit) }}"
+    when: g_master_hosts|length > 0
+    changed_when: no
+
+  - name: Evaluate oo_new_etcd_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_new_etcd_to_config
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_become: "{{ g_sudo | default(omit) }}"
+    with_items: "{{ g_new_etcd_hosts | default([]) }}"
+    changed_when: no
+
+  - name: Evaluate oo_masters_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_masters_to_config
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_become: "{{ g_sudo | default(omit) }}"
+    with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}"
+    changed_when: no
+
+  - name: Evaluate oo_etcd_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_etcd_to_config
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_become: "{{ g_sudo | default(omit) }}"
+    with_items: "{{ g_etcd_hosts | default([]) }}"
+    changed_when: no
+
+  - name: Evaluate oo_first_etcd
+    add_host:
+      name: "{{ g_etcd_hosts[0] }}"
+      groups: oo_first_etcd
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_become: "{{ g_sudo | default(omit) }}"
+    when: g_etcd_hosts|length > 0
+    changed_when: no
+
+  # We use two groups one for hosts we're upgrading which doesn't include embedded etcd
+  # The other for backing up which includes the embedded etcd host, there's no need to
+  # upgrade embedded etcd that just happens when the master is updated.
+  - name: Evaluate oo_etcd_hosts_to_upgrade
+    add_host:
+      name: "{{ item }}"
+      groups: oo_etcd_hosts_to_upgrade
+    with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}"
+    changed_when: False
+
+  - name: Evaluate oo_etcd_hosts_to_backup
+    add_host:
+      name: "{{ item }}"
+      groups: oo_etcd_hosts_to_backup
+    with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else (groups.oo_first_master | default([])) }}"
+    changed_when: False
+
+  - name: Evaluate oo_nodes_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_nodes_to_config
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_become: "{{ g_sudo | default(omit) }}"
+    with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}"
+    changed_when: no
+
+  # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is
+  - name: Add master to oo_nodes_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_nodes_to_config
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_become: "{{ g_sudo | default(omit) }}"
+    with_items: "{{ g_master_hosts | default([]) }}"
+    when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool
+    changed_when: no
+
+  - name: Evaluate oo_lb_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_lb_to_config
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_become: "{{ g_sudo | default(omit) }}"
+    with_items: "{{ g_lb_hosts | default([]) }}"
+    changed_when: no
+
+  - name: Evaluate oo_nfs_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_nfs_to_config
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_become: "{{ g_sudo | default(omit) }}"
+    with_items: "{{ g_nfs_hosts | default([]) }}"
+    changed_when: no
+
+  - name: Evaluate oo_glusterfs_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_glusterfs_to_config
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_become: "{{ g_sudo | default(omit) }}"
+    with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts | default([])) }}"
+    changed_when: no
+
+  - name: Evaluate oo_etcd_to_migrate
+    add_host:
+      name: "{{ item }}"
+      groups: oo_etcd_to_migrate
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_become: "{{ g_sudo | default(omit) }}"
+    with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else (groups.oo_first_master |default([]))}}"
+    changed_when: no
diff --git a/playbooks/init/facts.yml b/playbooks/init/facts.yml
new file mode 100644
index 000000000..91223d368
--- /dev/null
+++ b/playbooks/init/facts.yml
@@ -0,0 +1,169 @@
+---
+- name: Ensure that all non-node hosts are accessible
+  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config
+  any_errors_fatal: true
+  tasks:
+
+- name: Initialize host facts
+  hosts: oo_all_hosts
+  tasks:
+  - name: load openshift_facts module
+    include_role:
+      name: openshift_facts
+    static: yes
+
+  # TODO: Should this role be refactored into health_checks??
+  - name: Run openshift_sanitize_inventory to set variables
+    include_role:
+      name: openshift_sanitize_inventory
+
+  - name: Detecting Operating System from ostree_booted
+    stat:
+      path: /run/ostree-booted
+    register: ostree_booted
+
+  # Locally setup containerized facts for now
+  - name: initialize_facts set fact l_is_atomic
+    set_fact:
+      l_is_atomic: "{{ ostree_booted.stat.exists }}"
+
+  - name: initialize_facts set fact for containerized and l_is_*_system_container
+    set_fact:
+      l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
+      l_is_openvswitch_system_container: "{{ (openshift_use_openvswitch_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+      l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+      l_is_master_system_container: "{{ (openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+      l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+
+  - name: initialize_facts set facts for l_any_system_container
+    set_fact:
+      l_any_system_container: "{{ l_is_etcd_system_container or l_is_openvswitch_system_container or l_is_node_system_container or l_is_master_system_container }}"
+
+  - name: initialize_facts set fact for l_etcd_runtime
+    set_fact:
+      l_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}"
+
+  # TODO: Should this be moved into health checks??
+  # Seems as though any check that happens with a corresponding fail should move into health_checks
+  - name: Validate python version - ans_dist is fedora and python is v3
+    fail:
+      msg: |
+        openshift-ansible requires Python 3 for {{ ansible_distribution }};
+        For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html
+    when:
+    - ansible_distribution == 'Fedora'
+    - ansible_python['version']['major'] != 3
+
+  # TODO: Should this be moved into health checks??
+  # Seems as though any check that happens with a corresponding fail should move into health_checks
+  - name: Validate python version - ans_dist not Fedora and python must be v2
+    fail:
+      msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}"
+    when:
+    - ansible_distribution != 'Fedora'
+    - ansible_python['version']['major'] != 2
+
+  # TODO: Should this be moved into health checks??
+  # Seems as though any check that happens with a corresponding fail should move into health_checks
+  # Fail as early as possible if Atomic and old version of Docker
+  - when:
+    - l_is_atomic | bool
+    block:
+
+    # See https://access.redhat.com/articles/2317361
+    # and https://github.com/ansible/ansible/issues/15892
+    # NOTE: the "'s can not be removed at this level else the docker command will fail
+    # NOTE: When ansible >2.2.1.x is used this can be updated per
+    # https://github.com/openshift/openshift-ansible/pull/3475#discussion_r103525121
+    - name: Determine Atomic Host Docker Version
+      shell: 'CURLY="{"; docker version --format "$CURLY{json .Server.Version}}"'
+      register: l_atomic_docker_version
+
+    - name: assert atomic host docker version is 1.12 or later
+      assert:
+        that:
+        - l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=')
+        msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.
+
+  - when:
+    - not l_is_atomic | bool
+    block:
+    - name: Ensure openshift-ansible installer package deps are installed
+      package:
+        name: "{{ item }}"
+        state: present
+      with_items:
+      - iproute
+      - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
+      - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
+      - yum-utils
+
+    - name: Ensure various deps for running system containers are installed
+      package:
+        name: "{{ item }}"
+        state: present
+      with_items:
+      - atomic
+      - ostree
+      - runc
+      when:
+      - l_any_system_container | bool
+
+  - name: Default system_images_registry to a enterprise registry
+    set_fact:
+      system_images_registry: "registry.access.redhat.com"
+    when:
+    - system_images_registry is not defined
+    - openshift_deployment_type == "openshift-enterprise"
+
+  - name: Default system_images_registry to community registry
+    set_fact:
+      system_images_registry: "docker.io"
+    when:
+    - system_images_registry is not defined
+    - openshift_deployment_type == "origin"
+
+  - name: Gather Cluster facts and set is_containerized if needed
+    openshift_facts:
+      role: common
+      local_facts:
+        deployment_type: "{{ openshift_deployment_type }}"
+        deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
+        cli_image: "{{ osm_image | default(None) }}"
+        hostname: "{{ openshift_hostname | default(None) }}"
+        ip: "{{ openshift_ip | default(None) }}"
+        is_containerized: "{{ l_is_containerized | default(None) }}"
+        is_openvswitch_system_container: "{{ l_is_openvswitch_system_container | default(false) }}"
+        is_node_system_container: "{{ l_is_node_system_container | default(false) }}"
+        is_master_system_container: "{{ l_is_master_system_container | default(false) }}"
+        is_etcd_system_container: "{{ l_is_etcd_system_container | default(false) }}"
+        etcd_runtime: "{{ l_etcd_runtime }}"
+        system_images_registry: "{{ system_images_registry }}"
+        public_hostname: "{{ openshift_public_hostname | default(None) }}"
+        public_ip: "{{ openshift_public_ip | default(None) }}"
+        portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
+        http_proxy: "{{ openshift_http_proxy | default(None) }}"
+        https_proxy: "{{ openshift_https_proxy | default(None) }}"
+        no_proxy: "{{ openshift_no_proxy | default(None) }}"
+        generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
+
+  - name: Set fact of no_proxy_internal_hostnames
+    openshift_facts:
+      role: common
+      local_facts:
+        no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+                                             | union(groups['oo_masters_to_config'])
+                                             | union(groups['oo_etcd_to_config'] | default([])))
+                                         | oo_collect('openshift.common.hostname') | default([]) | join (',')
+                                         }}"
+    when:
+    - openshift_http_proxy is defined or openshift_https_proxy is defined
+    - openshift_generate_no_proxy_hosts | default(True) | bool
+
+  - name: initialize_facts set_fact repoquery command
+    set_fact:
+      repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
+
+  - name: initialize_facts set_fact on openshift_docker_hosted_registry_network
+    set_fact:
+      openshift_docker_hosted_registry_network: "{{ '' if 'oo_first_master' not in groups else hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
diff --git a/playbooks/init/main.yml b/playbooks/init/main.yml
new file mode 100644
index 000000000..87ffeafc7
--- /dev/null
+++ b/playbooks/init/main.yml
@@ -0,0 +1,38 @@
+---
+- name: Initialization Checkpoint Start
+  hosts: all
+  gather_facts: false
+  roles:
+  - installer_checkpoint
+  tasks:
+  - name: Set install initialization 'In Progress'
+    run_once: true
+    set_stats:
+      data:
+        installer_phase_initialize:
+          status: "In Progress"
+          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- include: evaluate_groups.yml
+
+- include: facts.yml
+
+- include: sanity_checks.yml
+
+- include: validate_hostnames.yml
+
+- include: repos.yml
+
+- include: version.yml
+
+- name: Initialization Checkpoint End
+  hosts: all
+  gather_facts: false
+  tasks:
+  - name: Set install initialization 'Complete'
+    run_once: true
+    set_stats:
+      data:
+        installer_phase_initialize:
+          status: "Complete"
+          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/init/repos.yml b/playbooks/init/repos.yml
new file mode 100644
index 000000000..a7114fc80
--- /dev/null
+++ b/playbooks/init/repos.yml
@@ -0,0 +1,8 @@
+---
+- name: Setup yum repositories for all hosts
+  hosts: oo_all_hosts
+  gather_facts: no
+  tasks:
+  - name: initialize openshift repos
+    include_role:
+      name: openshift_repos
diff --git a/playbooks/init/roles b/playbooks/init/roles
new file mode 120000
index 000000000..b741aa3db
--- /dev/null
+++ b/playbooks/init/roles
@@ -0,0 +1 @@
+../../roles
\ No newline at end of file
diff --git a/playbooks/init/sanity_checks.yml b/playbooks/init/sanity_checks.yml
new file mode 100644
index 000000000..26716a92d
--- /dev/null
+++ b/playbooks/init/sanity_checks.yml
@@ -0,0 +1,51 @@
+---
+- name: Verify Requirements
+  hosts: oo_all_hosts
+  tasks:
+  - fail:
+      msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel
+    when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool
+
+  - fail:
+      msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage
+    when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool
+
+  - fail:
+      msg: Nuage sdn can not be used with flannel
+    when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool
+
+  - fail:
+      msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv
+    when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool
+
+  - fail:
+      msg: Contiv can not be used with flannel
+    when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+  - fail:
+      msg: Contiv can not be used with nuage
+    when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+  - fail:
+      msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico
+    when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool
+
+  - fail:
+      msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both.
+    when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool
+
+  - fail:
+      msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both
+    when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool
+
+  - fail:
+      msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both
+    when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+  - fail:
+      msg: openshift_hostname must be 63 characters or less
+    when: openshift_hostname is defined and openshift_hostname | length > 63
+
+  - fail:
+      msg: openshift_public_hostname must be 63 characters or less
+    when: openshift_public_hostname is defined and openshift_public_hostname | length > 63
diff --git a/playbooks/init/validate_hostnames.yml b/playbooks/init/validate_hostnames.yml
new file mode 100644
index 000000000..be2e6a15a
--- /dev/null
+++ b/playbooks/init/validate_hostnames.yml
@@ -0,0 +1,23 @@
+---
+- name: Validate node hostnames
+  hosts: oo_nodes_to_config
+  tasks:
+  - name: Query DNS for IP address of {{ openshift.common.hostname }}
+    shell:
+      getent ahostsv4 {{ openshift.common.hostname }} | head -n 1 | awk '{ print $1 }'
+    register: lookupip
+    changed_when: false
+    failed_when: false
+  - name: Warn user about bad openshift_hostname values
+    pause:
+      prompt:
+        The hostname {{ openshift.common.hostname }} for {{ ansible_nodename }}
+        doesn't resolve to an IP address owned by this host. Please set
+        openshift_hostname variable to a hostname that when resolved on the host
+        in question resolves to an IP address matching an interface on this
+        host. This host will fail liveness checks for pods utilizing hostPorts,
+        press ENTER to continue or CTRL-C to abort.
+      seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}"
+    when:
+    - lookupip.stdout != '127.0.0.1'
+    - lookupip.stdout not in ansible_all_ipv4_addresses
diff --git a/playbooks/init/vars/cluster_hosts.yml b/playbooks/init/vars/cluster_hosts.yml
new file mode 100644
index 000000000..e807ac004
--- /dev/null
+++ b/playbooks/init/vars/cluster_hosts.yml
@@ -0,0 +1,26 @@
+---
+g_etcd_hosts: "{{ groups.etcd | default([]) }}"
+
+g_new_etcd_hosts: "{{ groups.new_etcd | default([]) }}"
+
+g_lb_hosts: "{{ groups.lb | default([]) }}"
+
+g_master_hosts: "{{ groups.masters | default([]) }}"
+
+g_new_master_hosts: "{{ groups.new_masters | default([]) }}"
+
+g_node_hosts: "{{ groups.nodes | default([]) }}"
+
+g_new_node_hosts: "{{ groups.new_nodes | default([]) }}"
+
+g_nfs_hosts: "{{ groups.nfs | default([]) }}"
+
+g_glusterfs_hosts: "{{ groups.glusterfs | default([]) }}"
+
+g_glusterfs_registry_hosts: "{{ groups.glusterfs_registry | default(g_glusterfs_hosts) }}"
+
+g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
+                 | union(g_new_etcd_hosts) | union(g_lb_hosts) | union(g_nfs_hosts)
+                 | union(g_new_node_hosts)| union(g_new_master_hosts)
+                 | union(g_glusterfs_hosts) | union(g_glusterfs_registry_hosts)
+                 | default([]) }}"
diff --git a/playbooks/init/version.yml b/playbooks/init/version.yml
new file mode 100644
index 000000000..37a5284d5
--- /dev/null
+++ b/playbooks/init/version.yml
@@ -0,0 +1,21 @@
+---
+# NOTE: requires openshift_facts be run
+- name: Determine openshift_version to configure on first master
+  hosts: oo_first_master
+  roles:
+  - openshift_version
+
+# NOTE: We set this even on etcd hosts as they may also later run as masters,
+# and we don't want to install wrong version of docker and have to downgrade
+# later.
+- name: Set openshift_version for etcd, node, and master hosts
+  hosts: oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master
+  vars:
+    openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
+  pre_tasks:
+  - set_fact:
+      openshift_pkg_version: -{{ openshift_version }}
+    when: openshift_pkg_version is not defined
+  - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version }}"
+  roles:
+  - openshift_version
-- 
cgit v1.2.3