summaryrefslogtreecommitdiffstats
path: root/playbooks/common
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/common')
-rw-r--r--playbooks/common/openshift-cluster/config.yml4
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml60
-rw-r--r--playbooks/common/openshift-cluster/scaleup.yml2
-rw-r--r--playbooks/common/openshift-cluster/update_repos_and_packages.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check17
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/versions.sh5
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml96
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml50
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml87
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml137
-rw-r--r--playbooks/common/openshift-docker/config.yml9
l---------playbooks/common/openshift-docker/filter_plugins1
l---------playbooks/common/openshift-docker/lookup_plugins1
l---------playbooks/common/openshift-docker/roles1
-rw-r--r--playbooks/common/openshift-etcd/config.yml7
-rw-r--r--playbooks/common/openshift-etcd/service.yml2
-rw-r--r--playbooks/common/openshift-master/config.yml92
-rw-r--r--playbooks/common/openshift-master/restart.yml151
-rw-r--r--playbooks/common/openshift-master/restart_hosts.yml39
-rw-r--r--playbooks/common/openshift-master/restart_hosts_pacemaker.yml25
-rw-r--r--playbooks/common/openshift-master/restart_services.yml27
-rw-r--r--playbooks/common/openshift-master/restart_services_pacemaker.yml10
-rw-r--r--playbooks/common/openshift-master/service.yml2
-rw-r--r--playbooks/common/openshift-nfs/config.yml5
l---------playbooks/common/openshift-nfs/filter_plugins1
l---------playbooks/common/openshift-nfs/lookup_plugins1
l---------playbooks/common/openshift-nfs/roles1
-rw-r--r--playbooks/common/openshift-nfs/service.yml18
-rw-r--r--playbooks/common/openshift-node/config.yml67
-rw-r--r--playbooks/common/openshift-node/service.yml2
35 files changed, 826 insertions, 104 deletions
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 482fa8441..11e5b68f6 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,8 +1,12 @@
---
- include: evaluate_groups.yml
+- include: ../openshift-docker/config.yml
+
- include: ../openshift-etcd/config.yml
+- include: ../openshift-nfs/config.yml
+
- include: ../openshift-master/config.yml
- include: ../openshift-node/config.yml
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index 34da372a4..db7105ed5 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -1,23 +1,33 @@
---
- name: Populate config host groups
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
tasks:
- fail:
- msg: This playbook requires g_etcd_group to be set
- when: g_etcd_group is not defined
+ msg: This playbook requires g_etcd_hosts to be set
+ when: g_etcd_hosts is not defined
- fail:
- msg: This playbook requires g_masters_group to be set
- when: g_masters_group is not defined
+ msg: This playbook requires g_master_hosts to be set
+ when: g_master_hosts is not defined
- fail:
- msg: This playbook requires g_nodes_group or g_new_nodes_group to be set
- when: g_nodes_group is not defined and g_new_nodes_group is not defined
+ msg: This playbook requires g_node_hosts or g_new_node_hosts to be set
+ when: g_node_hosts is not defined and g_new_node_hosts is not defined
- fail:
- msg: This playbook requires g_lb_group to be set
- when: g_lb_group is not defined
+ msg: This playbook requires g_lb_hosts to be set
+ when: g_lb_hosts is not defined
+
+ - fail:
+ msg: This playbook requires g_nfs_hosts to be set
+ when: g_nfs_hosts is not defined
+
+ - fail:
+ msg: The nfs group must be limited to one host
+ when: (groups[g_nfs_hosts] | default([])) | length > 1
- name: Evaluate oo_etcd_to_config
add_host:
@@ -25,7 +35,7 @@
groups: oo_etcd_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_etcd_group] | default([])
+ with_items: "{{ g_etcd_hosts | default([]) }}"
- name: Evaluate oo_masters_to_config
add_host:
@@ -33,11 +43,11 @@
groups: oo_masters_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_masters_group] | default([])
+ with_items: "{{ g_master_hosts | default([]) }}"
- # Use g_new_nodes_group if it exists otherwise g_nodes_group
+ # Use g_new_node_hosts if it exists otherwise g_node_hosts
- set_fact:
- g_nodes_to_config: "{{ g_new_nodes_group | default(g_nodes_group | default([])) }}"
+ g_node_hosts_to_config: "{{ g_new_node_hosts | default(g_node_hosts | default([])) }}"
- name: Evaluate oo_nodes_to_config
add_host:
@@ -45,32 +55,32 @@
groups: oo_nodes_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_nodes_to_config] | default([])
+ with_items: "{{ g_node_hosts_to_config | default([]) }}"
- # Skip adding the master to oo_nodes_to_config when g_new_nodes_group is
+ # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is
- name: Evaluate oo_nodes_to_config
add_host:
name: "{{ item }}"
groups: oo_nodes_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_masters_group] | default([])
- when: g_nodeonmaster | default(false) == true and g_new_nodes_group is not defined
+ with_items: "{{ g_master_hosts | default([]) }}"
+ when: g_nodeonmaster | default(false) == true and g_new_node_hosts is not defined
- name: Evaluate oo_first_etcd
add_host:
- name: "{{ groups[g_etcd_group][0] }}"
+ name: "{{ g_etcd_hosts[0] }}"
groups: oo_first_etcd
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0
+ when: g_etcd_hosts|length > 0
- name: Evaluate oo_first_master
add_host:
- name: "{{ groups[g_masters_group][0] }}"
+ name: "{{ g_master_hosts[0] }}"
groups: oo_first_master
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_sudo: "{{ g_sudo | default(omit) }}"
- when: g_masters_group in groups and (groups[g_masters_group] | length) > 0
+ when: g_master_hosts|length > 0
- name: Evaluate oo_lb_to_config
add_host:
@@ -78,4 +88,12 @@
groups: oo_lb_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_lb_group] | default([])
+ with_items: "{{ g_lb_hosts | default([]) }}"
+
+ - name: Evaluate oo_nfs_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nfs_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_nfs_hosts | default([]) }}"
diff --git a/playbooks/common/openshift-cluster/scaleup.yml b/playbooks/common/openshift-cluster/scaleup.yml
index e1778e41e..d2ba3fc7a 100644
--- a/playbooks/common/openshift-cluster/scaleup.yml
+++ b/playbooks/common/openshift-cluster/scaleup.yml
@@ -3,6 +3,4 @@
- include: ../openshift-node/config.yml
vars:
- osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
- osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
index 190e2d862..1474bb3ca 100644
--- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml
+++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
@@ -4,7 +4,7 @@
openshift_deployment_type: "{{ deployment_type }}"
roles:
- role: rhel_subscribe
- when: deployment_type == "enterprise" and
+ when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
ansible_distribution == "RedHat" and
lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
default('no', True) | lower in ['no', 'false']
diff --git a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check b/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
index b5459f312..e5c958ebb 100644
--- a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
+++ b/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
@@ -111,13 +111,16 @@ def print_validation_header():
overwhelming the user.
"""
print """\
-At least one port name does not validate. Valid port names:
+At least one port name is invalid and must be corrected before upgrading.
+Please update or remove any resources with invalid port names.
- * must be less that 16 chars
+ Valid port names must:
+
+ * be less that 16 characters
* have at least one letter
- * only a-z0-9-
- * do not start or end with -
- * Dashes may not be next to eachother ('--')
+ * contain only a-z0-9-
+ * not start or end with -
+ * not contain dashes next to each other ('--')
"""
@@ -142,9 +145,9 @@ def main():
# Where the magic happens
first_error = True
for kind, path in [
+ ('deploymentconfigs', ("spec", "template", "spec", "containers")),
('replicationcontrollers', ("spec", "template", "spec", "containers")),
- ('pods', ("spec", "containers")),
- ('deploymentconfigs', ("spec", "template", "spec", "containers"))]:
+ ('pods', ("spec", "containers"))]:
for item in list_items(kind):
namespace = item["metadata"]["namespace"]
item_name = item["metadata"]["name"]
diff --git a/playbooks/common/openshift-cluster/upgrades/files/versions.sh b/playbooks/common/openshift-cluster/upgrades/files/versions.sh
index c7c966b60..3a1a8ebb1 100644
--- a/playbooks/common/openshift-cluster/upgrades/files/versions.sh
+++ b/playbooks/common/openshift-cluster/upgrades/files/versions.sh
@@ -1,9 +1,8 @@
#!/bin/bash
-yum_installed=$(yum list installed "$@" 2>&1 | tail -n +2 | grep -v 'Installed Packages' | grep -v 'Red Hat Subscription Management' | grep -v 'Error:' | awk '{ print $2 }' | tr '\n' ' ')
-
-yum_available=$(yum list available -q "$@" 2>&1 | tail -n +2 | grep -v 'Available Packages' | grep -v 'Red Hat Subscription Management' | grep -v 'el7ose' | grep -v 'Error:' | awk '{ print $2 }' | tr '\n' ' ')
+yum_installed=$(yum list installed -e 0 -q "$@" 2>&1 | tail -n +2 | awk '{ print $2 }' | sort -r | tr '\n' ' ')
+yum_available=$(yum list available -e 0 -q "$@" 2>&1 | tail -n +2 | grep -v 'el7ose' | awk '{ print $2 }' | sort -r | tr '\n' ' ')
echo "---"
echo "curr_version: ${yum_installed}"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
index 9f7e49b93..63c8ef756 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
@@ -11,7 +11,7 @@
openshift_version: "{{ openshift_pkg_version | default('') }}"
tasks:
- name: Upgrade master packages
- yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest
+ action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest"
- name: Restart master services
service: name="{{ openshift.common.service_type}}-master" state=restarted
@@ -21,7 +21,7 @@
openshift_version: "{{ openshift_pkg_version | default('') }}"
tasks:
- name: Upgrade node packages
- yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest
+ action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest"
- name: Restart node services
service: name="{{ openshift.common.service_type }}-node" state=restarted
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
index 0309e8a77..8ec379109 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -12,6 +12,8 @@
- name: Evaluate additional groups for upgrade
hosts: localhost
+ connection: local
+ become: no
tasks:
- name: Evaluate etcd_hosts_to_backup
add_host:
@@ -27,6 +29,7 @@
hosts: oo_first_master
vars:
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ target_version: "{{ '1.1' if deployment_type == 'origin' else '3.1' }}"
gather_facts: no
tasks:
# Pacemaker is currently the only supported upgrade path for multiple masters
@@ -43,8 +46,8 @@
- fail:
msg: >
openshift_pkg_version is {{ openshift_pkg_version }} which is not a
- valid version for a 3.1 upgrade
- when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare('3.0.2.900','<')
+ valid version for a {{ target_version }} upgrade
+ when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
# If this script errors out ansible will show the default stdout/stderr
# which contains details for the user:
@@ -53,9 +56,11 @@
- name: Verify upgrade can proceed
hosts: oo_masters_to_config:oo_nodes_to_config
+ vars:
+ target_version: "{{ '1.1' if deployment_type == 'origin' else '3.1' }}"
tasks:
- - name: Clean yum cache
- command: yum clean all
+ - name: Clean package cache
+ command: "{{ ansible_pkg_mgr }} clean all"
- set_fact:
g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
@@ -75,8 +80,8 @@
when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.0.6','<')
- fail:
- msg: Atomic OpenShift 3.1 packages not found
- when: g_aos_versions.curr_version | version_compare('3.0.2.900','<') and (g_aos_versions.avail_version is none or g_aos_versions.avail_version | version_compare('3.0.2.900','<'))
+ msg: Upgrade packages not found
+ when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
- set_fact:
pre_upgrade_complete: True
@@ -87,6 +92,8 @@
##############################################################################
- name: Gate on pre-upgrade checks
hosts: localhost
+ connection: local
+ become: no
vars:
pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
tasks:
@@ -149,9 +156,7 @@
when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
- name: Install etcd (for etcdctl)
- yum:
- pkg: etcd
- state: latest
+ action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
- name: Generate etcd backup
command: >
@@ -171,6 +176,8 @@
##############################################################################
- name: Gate on etcd backup
hosts: localhost
+ connection: local
+ become: no
tasks:
- set_fact:
etcd_backup_completed: "{{ hostvars
@@ -189,6 +196,8 @@
###############################################################################
- name: Create temp directory for syncing certs
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
tasks:
- name: Create local temp directory for syncing certs
@@ -222,17 +231,14 @@
openshift_version: "{{ openshift_pkg_version | default('') }}"
tasks:
- name: Upgrade to latest available kernel
- yum:
- pkg: kernel
- state: latest
+ action: "{{ ansible_pkg_mgr}} name=kernel state=latest"
- name: Upgrade master packages
- command: yum update -y {{ openshift.common.service_type }}-master{{ openshift_version }}
+ command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}"
- name: Ensure python-yaml present for config upgrade
- yum:
- pkg: PyYAML
- state: installed
+ action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
+ when: not openshift.common.is_atomic | bool
- name: Upgrade master configuration
openshift_upgrade_config:
@@ -242,7 +248,31 @@
config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
- set_fact:
- master_certs_missing: True
+ openshift_master_certs_no_etcd:
+ - admin.crt
+ - master.kubelet-client.crt
+ - "{{ 'master.proxy-client.crt' if openshift.common.version_greater_than_3_1_or_1_1 else omit }}"
+ - master.server.crt
+ - openshift-master.crt
+ - openshift-registry.crt
+ - openshift-router.crt
+ - etcd.server.crt
+ openshift_master_certs_etcd:
+ - master.etcd-client.crt
+
+ - set_fact:
+ openshift_master_certs: "{{ (openshift_master_certs_no_etcd | union(openshift_master_certs_etcd)) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else openshift_master_certs_no_etcd }}"
+
+ - name: Check status of master certificates
+ stat:
+ path: "{{ openshift.common.config_base }}/master/{{ item }}"
+ with_items: openshift_master_certs
+ register: g_master_cert_stat_result
+
+ - set_fact:
+ master_certs_missing: "{{ False in (g_master_cert_stat_result.results
+ | oo_collect(attribute='stat.exists')
+ | list ) }}"
master_cert_subdir: master-{{ openshift.common.hostname }}
master_cert_config_dir: "{{ openshift.common.config_base }}/master"
@@ -256,8 +286,8 @@
| oo_flatten | unique }}"
master_generated_certs_dir: "{{ openshift.common.config_base }}/generated-configs"
masters_needing_certs: "{{ hostvars
- | oo_select_keys(groups.oo_masters_to_config)
- | difference([groups.oo_first_master.0]) }}"
+ | oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master']))
+ | oo_filter_list(filter_attr='master_certs_missing') }}"
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
openshift_deployment_type: "{{ deployment_type }}"
roles:
@@ -339,6 +369,8 @@
- name: Delete temporary directory on localhost
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
tasks:
- file: name={{ g_master_mktemp.stdout }} state=absent
@@ -357,6 +389,8 @@
##############################################################################
- name: Gate on master update
hosts: localhost
+ connection: local
+ become: no
tasks:
- set_fact:
master_update_completed: "{{ hostvars
@@ -380,7 +414,7 @@
- openshift_facts
tasks:
- name: Upgrade node packages
- command: yum update -y {{ openshift.common.service_type }}-node{{ openshift_version }}
+ command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}"
- name: Restart node service
service: name="{{ openshift.common.service_type }}-node" state=restarted
@@ -388,6 +422,24 @@
- name: Ensure node service enabled
service: name="{{ openshift.common.service_type }}-node" state=started enabled=yes
+ - name: Install Ceph storage plugin dependencies
+ action: "{{ ansible_pkg_mgr }} name=ceph-common state=present"
+
+ - name: Install GlusterFS storage plugin dependencies
+ action: "{{ ansible_pkg_mgr }} name=glusterfs-fuse state=present"
+
+ - name: Set sebooleans to allow gluster storage plugin access from containers
+ seboolean:
+ name: "{{ item }}"
+ state: yes
+ persistent: yes
+ when: ansible_selinux and ansible_selinux.status == "enabled"
+ with_items:
+ - virt_use_fusefs
+ - virt_sandbox_use_fusefs
+ register: sebool_result
+ failed_when: "'state' not in sebool_result and 'msg' in sebool_result and 'SELinux boolean {{ item }} does not exist' not in sebool_result.msg"
+
- set_fact:
node_update_complete: True
@@ -397,6 +449,8 @@
##############################################################################
- name: Gate on nodes update
hosts: localhost
+ connection: local
+ become: no
tasks:
- set_fact:
node_update_completed: "{{ hostvars
@@ -464,6 +518,8 @@
##############################################################################
- name: Gate on reconcile
hosts: localhost
+ connection: local
+ become: no
tasks:
- set_fact:
reconcile_completed: "{{ hostvars
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins
new file mode 120000
index 000000000..27ddaa18b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library
new file mode 120000
index 000000000..53bed9684
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library
@@ -0,0 +1 @@
+../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins
new file mode 120000
index 000000000..cf407f69b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins
@@ -0,0 +1 @@
+../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml
new file mode 100644
index 000000000..d8336fcae
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml
@@ -0,0 +1,50 @@
+---
+###############################################################################
+# Post upgrade - Upgrade default router, default registry and examples
+###############################################################################
+- name: Upgrade default router and default registry
+ hosts: oo_first_master
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+ registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ roles:
+ # Create the new templates shipped in 3.1.z, existing templates are left
+ # unmodified. This prevents the subsequent role definition for
+ # openshift_examples from failing when trying to replace templates that do
+ # not already exist. We could have potentially done a replace --force to
+ # create and update in one step.
+ - openshift_examples
+ # Update the existing templates
+ - role: openshift_examples
+ openshift_examples_import_command: replace
+ pre_tasks:
+ - name: Check for default router
+ command: >
+ {{ oc_cmd }} get -n default dc/router
+ register: _default_router
+ failed_when: false
+ changed_when: false
+
+ - name: Check for default registry
+ command: >
+ {{ oc_cmd }} get -n default dc/docker-registry
+ register: _default_registry
+ failed_when: false
+ changed_when: false
+
+ - name: Update router image to current version
+ when: _default_router.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/router -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
+ --api-version=v1
+
+ - name: Update registry image to current version
+ when: _default_registry.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/docker-registry -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
+ --api-version=v1
+
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
new file mode 100644
index 000000000..91780de09
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
@@ -0,0 +1,87 @@
+---
+###############################################################################
+# Evaluate host groups and gather facts
+###############################################################################
+- name: Load openshift_facts
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_facts
+
+###############################################################################
+# Pre-upgrade checks
+###############################################################################
+- name: Verify upgrade can proceed
+ hosts: oo_first_master
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}"
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: >
+ This upgrade is only supported for origin, openshift-enterprise, and online
+ deployment types
+ when: deployment_type not in ['origin','openshift-enterprise', 'online']
+
+ - fail:
+ msg: >
+ openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+ valid version for a {{ target_version }} upgrade
+ when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
+
+- name: Verify upgrade can proceed
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ vars:
+ target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}"
+ tasks:
+ - name: Clean package cache
+ command: "{{ ansible_pkg_mgr }} clean all"
+
+ - set_fact:
+ g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
+
+ - name: Determine available versions
+ script: ../files/versions.sh {{ g_new_service_name }} openshift
+ register: g_versions_result
+
+ - set_fact:
+ g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}"
+
+ - set_fact:
+ g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
+
+ - fail:
+ msg: This playbook requires Origin 1.1 or later
+ when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<')
+
+ - fail:
+ msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later
+ when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<')
+
+ - fail:
+ msg: Upgrade packages not found
+ when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
+
+ - set_fact:
+ pre_upgrade_complete: True
+
+
+##############################################################################
+# Gate on pre-upgrade checks
+##############################################################################
+- name: Gate on pre-upgrade checks
+ hosts: localhost
+ connection: local
+ become: no
+ vars:
+ pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
+ tasks:
+ - set_fact:
+ pre_upgrade_completed: "{{ hostvars
+ | oo_select_keys(pre_upgrade_hosts)
+ | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
+ - set_fact:
+ pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
+ when: pre_upgrade_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
new file mode 100644
index 000000000..81dbba1e3
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
@@ -0,0 +1,137 @@
+---
+###############################################################################
+# The restart playbook should be run after this playbook completes.
+###############################################################################
+
+###############################################################################
+# Upgrade Masters
+###############################################################################
+- name: Upgrade master packages and configuration
+ hosts: oo_masters_to_config
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ tasks:
+ - name: Upgrade master packages
+ command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}"
+
+ - name: Ensure python-yaml present for config upgrade
+ action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
+ when: not openshift.common.is_atomic | bool
+
+# Currently 3.1.1 does not have any new configuration settings
+#
+# - name: Upgrade master configuration
+# openshift_upgrade_config:
+# from_version: '3.0'
+# to_version: '3.1'
+# role: master
+# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
+
+- name: Set master update status to complete
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ master_update_complete: True
+
+##############################################################################
+# Gate on master update complete
+##############################################################################
+- name: Gate on master update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ master_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
+ - set_fact:
+ master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
+ when: master_update_failed | length > 0
+
+###############################################################################
+# Upgrade Nodes
+###############################################################################
+- name: Upgrade nodes
+ hosts: oo_nodes_to_config
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ - name: Upgrade node packages
+ command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}"
+
+ - name: Restart node service
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
+
+ - set_fact:
+ node_update_complete: True
+
+##############################################################################
+# Gate on nodes update
+##############################################################################
+- name: Gate on nodes update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ node_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_nodes_to_config)
+ | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
+ - set_fact:
+ node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
+ when: node_update_failed | length > 0
+
+###############################################################################
+# Reconcile Cluster Roles and Cluster Role Bindings
+###############################################################################
+- name: Reconcile Cluster Roles and Cluster Role Bindings
+ hosts: oo_masters_to_config
+ vars:
+ origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
+ ent_reconcile_bindings: true
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ tasks:
+ - name: Reconcile Cluster Roles
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-roles --confirm
+ run_once: true
+
+ - name: Reconcile Cluster Role Bindings
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-role-bindings
+ --exclude-groups=system:authenticated
+ --exclude-groups=system:unauthenticated
+ --exclude-users=system:anonymous
+ --additive-only=true --confirm
+ when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
+ run_once: true
+
+ - set_fact:
+ reconcile_complete: True
+
+##############################################################################
+# Gate on reconcile
+##############################################################################
+- name: Gate on reconcile
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ reconcile_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
+ - set_fact:
+ reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
+ when: reconcile_failed | length > 0
diff --git a/playbooks/common/openshift-docker/config.yml b/playbooks/common/openshift-docker/config.yml
new file mode 100644
index 000000000..092d5533c
--- /dev/null
+++ b/playbooks/common/openshift-docker/config.yml
@@ -0,0 +1,9 @@
+- name: Configure docker hosts
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ vars:
+ docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"
+ docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}"
+ docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}"
+ roles:
+ - openshift_facts
+ - openshift_docker
diff --git a/playbooks/common/openshift-docker/filter_plugins b/playbooks/common/openshift-docker/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/common/openshift-docker/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-docker/lookup_plugins b/playbooks/common/openshift-docker/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/common/openshift-docker/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-docker/roles b/playbooks/common/openshift-docker/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/common/openshift-docker/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index 3d7a884e8..93eb157cb 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -14,7 +14,8 @@
public_hostname: "{{ openshift_public_hostname | default(None) }}"
deployment_type: "{{ openshift_deployment_type }}"
- role: etcd
- local_facts: {}
+ local_facts:
+ etcd_image: "{{ osm_etcd_image | default(None) }}"
- name: Check status of etcd certificates
stat:
path: "{{ item }}"
@@ -33,7 +34,7 @@
- name: Create temp directory for syncing certs
hosts: localhost
connection: local
- sudo: false
+ become: no
gather_facts: no
tasks:
- name: Create local temp directory for syncing certs
@@ -116,7 +117,7 @@
- name: Delete temporary directory on localhost
hosts: localhost
connection: local
- sudo: false
+ become: no
gather_facts: no
tasks:
- file: name={{ g_etcd_mktemp.stdout }} state=absent
diff --git a/playbooks/common/openshift-etcd/service.yml b/playbooks/common/openshift-etcd/service.yml
index 0bf69b22f..fd2bc24ae 100644
--- a/playbooks/common/openshift-etcd/service.yml
+++ b/playbooks/common/openshift-etcd/service.yml
@@ -1,6 +1,8 @@
---
- name: Populate g_service_masters host group if needed
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
tasks:
- fail: msg="new_cluster_state is required to be injected in this playbook"
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index becd68dbe..6f86703d6 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -43,6 +43,7 @@
api_port: "{{ openshift_master_api_port | default(None) }}"
api_url: "{{ openshift_master_api_url | default(None) }}"
api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}"
+ controllers_port: "{{ openshift_master_controllers_port | default(None) }}"
public_api_url: "{{ openshift_master_public_api_url | default(None) }}"
cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
@@ -51,6 +52,7 @@
console_url: "{{ openshift_master_console_url | default(None) }}"
console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
+ portal_net: "{{ openshift_master_portal_net | default(None) }}"
- name: Check status of external etcd certificatees
stat:
path: "{{ openshift.common.config_base }}/master/{{ item }}"
@@ -70,7 +72,7 @@
- name: Create temp directory for syncing certs
hosts: localhost
connection: local
- sudo: false
+ become: no
gather_facts: no
tasks:
- name: Create local temp directory for syncing certs
@@ -84,6 +86,7 @@
etcd_generated_certs_dir: /etc/etcd/generated_certs
etcd_needing_client_certs: "{{ hostvars
| oo_select_keys(groups['oo_masters_to_config'])
+ | default([])
| oo_filter_list(filter_attr='etcd_client_certs_missing') }}"
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
roles:
@@ -161,6 +164,11 @@
| list ) }}"
master_cert_subdir: master-{{ openshift.common.hostname }}
master_cert_config_dir: "{{ openshift.common.config_base }}/master"
+ - set_fact:
+ openshift_infra_nodes: "{{ hostvars | oo_select_keys(groups['nodes'])
+ | oo_nodes_with_label('region', 'infra')
+ | oo_collect('inventory_hostname') }}"
+ when: openshift_infra_nodes is not defined
- name: Configure master certificates
hosts: oo_first_master
@@ -207,7 +215,7 @@
- name: Compute haproxy_backend_servers
hosts: localhost
connection: local
- sudo: false
+ become: no
gather_facts: no
tasks:
- set_fact:
@@ -217,6 +225,7 @@
hosts: oo_lb_to_config
vars:
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+ haproxy_frontend_port: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}"
haproxy_frontends:
- name: atomic-openshift-api
mode: tcp
@@ -232,42 +241,41 @@
balance: source
servers: "{{ hostvars.localhost.haproxy_backend_servers }}"
roles:
+ - role: openshift_facts
- role: haproxy
when: groups.oo_masters_to_config | length > 1
-- name: Generate master session keys
+- name: Check for cached session secrets
hosts: oo_first_master
+ roles:
+ - role: openshift_facts
+ post_tasks:
+ - openshift_facts:
+ role: master
+ local_facts:
+ session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}"
+ session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}"
+
+- name: Generate master session secrets
+ hosts: oo_first_master
+ vars:
+ g_session_secrets_present: "{{ (openshift.master.session_auth_secrets | default([]) and openshift.master.session_encryption_secrets | default([])) | length > 0 }}"
+ g_session_auth_secrets: "{{ [ 24 | oo_generate_secret ] }}"
+ g_session_encryption_secrets: "{{ [ 24 | oo_generate_secret ] }}"
+ roles:
+ - role: openshift_facts
tasks:
- - fail:
- msg: "Both openshift_master_session_auth_secrets and openshift_master_session_encryption_secrets must be provided if either variable is set"
- when: (openshift_master_session_auth_secrets is defined and openshift_master_session_encryption_secrets is not defined) or (openshift_master_session_encryption_secrets is defined and openshift_master_session_auth_secrets is not defined)
- - fail:
- msg: "openshift_master_session_auth_secrets and openshift_master_encryption_secrets must be equal length"
- when: (openshift_master_session_auth_secrets is defined and openshift_master_session_encryption_secrets is defined) and (openshift_master_session_auth_secrets | length != openshift_master_session_encryption_secrets | length)
- - name: Install OpenSSL package
- action: "{{ansible_pkg_mgr}} pkg=openssl state=present"
- - name: Generate session authentication key
- command: /usr/bin/openssl rand -base64 24
- register: session_auth_output
- with_sequence: count=1
- when: openshift_master_session_auth_secrets is undefined
- - name: Generate session encryption key
- command: /usr/bin/openssl rand -base64 24
- register: session_encryption_output
- with_sequence: count=1
- when: openshift_master_session_encryption_secrets is undefined
- - set_fact:
- session_auth_secret: "{{ openshift_master_session_auth_secrets
- | default(session_auth_output.results
- | oo_collect(attribute='stdout')
- | list) }}"
- session_encryption_secret: "{{ openshift_master_session_encryption_secrets
- | default(session_encryption_output.results
- | oo_collect(attribute='stdout')
- | list) }}"
+ - openshift_facts:
+ role: master
+ local_facts:
+ session_auth_secrets: "{{ g_session_auth_secrets }}"
+ session_encryption_secrets: "{{ g_session_encryption_secrets }}"
+ when: not g_session_secrets_present | bool
- name: Parse named certificates
hosts: localhost
+ connection: local
+ become: no
vars:
internal_hostnames: "{{ hostvars[groups.oo_first_master.0].openshift.common.internal_hostnames }}"
named_certificates: "{{ hostvars[groups.oo_first_master.0].openshift_master_named_certificates | default([]) }}"
@@ -313,13 +321,14 @@
- name: Configure master instances
hosts: oo_masters_to_config
+ any_errors_fatal: true
serial: 1
vars:
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
openshift_master_count: "{{ groups.oo_masters_to_config | length }}"
- openshift_master_session_auth_secrets: "{{ hostvars[groups['oo_first_master'][0]]['session_auth_secret'] }}"
- openshift_master_session_encryption_secrets: "{{ hostvars[groups['oo_first_master'][0]]['session_encryption_secret'] }}"
+ openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"
+ openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}"
pre_tasks:
- name: Ensure certificate directory exists
file:
@@ -336,6 +345,8 @@
- role: nickhammond.logrotate
- role: fluentd_master
when: openshift.common.use_fluentd | bool
+ - role: nuage_master
+ when: openshift.common.use_nuage | bool
post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
@@ -349,7 +360,8 @@
roles:
- role: openshift_master_cluster
when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
- - openshift_examples
+ - role: openshift_examples
+ when: openshift.common.install_examples | bool
- role: openshift_cluster_metrics
when: openshift.common.use_cluster_metrics | bool
- role: openshift_manageiq
@@ -361,7 +373,7 @@
cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}"
roles:
- role: cockpit
- when: ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
+ when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
(osm_use_cockpit | bool or osm_use_cockpit is undefined )
- name: Configure flannel
@@ -382,7 +394,7 @@
- name: Delete temporary directory on localhost
hosts: localhost
connection: local
- sudo: false
+ become: no
gather_facts: no
tasks:
- file: name={{ g_master_mktemp.stdout }} state=absent
@@ -399,7 +411,15 @@
- name: Create services
hosts: oo_first_master
+ vars:
+ attach_registry_volume: "{{ groups.oo_nfs_to_config | length > 0 }}"
+ pre_tasks:
+ - set_fact:
+ nfs_host: "{{ groups.oo_nfs_to_config.0 }}"
+ registry_volume_path: "{{ hostvars[groups.oo_nfs_to_config.0].openshift.nfs.exports_dir + '/' + hostvars[groups.oo_nfs_to_config.0].openshift.nfs.registry_volume }}"
+ when: attach_registry_volume | bool
roles:
- role: openshift_router
when: openshift.master.infra_nodes is defined
- #- role: openshift_registry
+ - role: openshift_registry
+ when: openshift.master.infra_nodes is defined and attach_registry_volume | bool
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml
new file mode 100644
index 000000000..02449e40d
--- /dev/null
+++ b/playbooks/common/openshift-master/restart.yml
@@ -0,0 +1,151 @@
+---
+- include: ../openshift-cluster/evaluate_groups.yml
+
+- name: Validate configuration for rolling restart
+ hosts: oo_masters_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - fail:
+ msg: "openshift_rolling_restart_mode must be set to either 'services' or 'system'"
+ when: openshift_rolling_restart_mode is defined and openshift_rolling_restart_mode not in ["services", "system"]
+ - openshift_facts:
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ - role: common
+ local_facts:
+ rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}"
+ - role: master
+ local_facts:
+ cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
+
+# Creating a temp file on localhost, we then check each system that will
+# be rebooted to see if that file exists, if so we know we're running
+# ansible on a machine that needs a reboot, and we need to error out.
+- name: Create temp file on localhost
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - local_action: command mktemp
+ register: mktemp
+ changed_when: false
+
+- name: Check if temp file exists on any masters
+ hosts: oo_masters_to_config
+ tasks:
+ - stat: path="{{ hostvars.localhost.mktemp.stdout }}"
+ register: exists
+ changed_when: false
+
+- name: Cleanup temp file on localhost
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent
+ changed_when: false
+
+- name: Warn if restarting the system where ansible is running
+ hosts: oo_masters_to_config
+ tasks:
+ - pause:
+ prompt: >
+ Warning: Running playbook from a host that will be restarted!
+ Press CTRL+C and A to abort playbook execution. You may
+ continue by pressing ENTER but the playbook will stop
+ executing after this system has been restarted and services
+ must be verified manually. To only restart services, set
+ openshift_master_rolling_restart_mode=services in host
+ inventory and relaunch the playbook.
+ when: exists.stat.exists and openshift.common.rolling_restart_mode == 'system'
+ - set_fact:
+ current_host: "{{ exists.stat.exists }}"
+ when: openshift.common.rolling_restart_mode == 'system'
+
+- name: Determine which masters are currently active
+ hosts: oo_masters_to_config
+ any_errors_fatal: true
+ tasks:
+ - name: Check master service status
+ command: >
+ systemctl is-active {{ openshift.common.service_type }}-master
+ register: active_check_output
+ when: openshift.master.cluster_method | default(None) == 'pacemaker'
+ failed_when: false
+ changed_when: false
+ - set_fact:
+ is_active: "{{ active_check_output.stdout == 'active' }}"
+ when: openshift.master.cluster_method | default(None) == 'pacemaker'
+
+- name: Evaluate master groups
+ hosts: localhost
+ become: no
+ tasks:
+ - fail:
+ msg: >
+ Did not receive active status from any masters. Please verify pacemaker cluster.
+ when: "{{ hostvars[groups.oo_first_master.0].openshift.master.cluster_method | default(None) == 'pacemaker' and 'True' not in (hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('is_active')
+ | list) }}"
+ - name: Evaluate oo_active_masters
+ add_host:
+ name: "{{ item }}"
+ groups: oo_active_masters
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups.oo_masters_to_config | default([]) }}"
+ when: (hostvars[item]['is_active'] | default(false)) | bool
+ - name: Evaluate oo_current_masters
+ add_host:
+ name: "{{ item }}"
+ groups: oo_current_masters
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups.oo_masters_to_config | default([]) }}"
+ when: (hostvars[item]['current_host'] | default(false)) | bool
+
+- name: Validate pacemaker cluster
+ hosts: oo_active_masters
+ tasks:
+ - name: Retrieve pcs status
+ command: pcs status
+ register: pcs_status_output
+ changed_when: false
+ - fail:
+ msg: >
+ Pacemaker cluster validation failed. One or more nodes are not online.
+ when: not (pcs_status_output.stdout | validate_pcs_cluster(groups.oo_masters_to_config)) | bool
+
+- name: Restart masters
+ hosts: oo_masters_to_config:!oo_active_masters:!oo_current_masters
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ serial: 1
+ tasks:
+ - include: restart_hosts.yml
+ when: openshift.common.rolling_restart_mode == 'system'
+ - include: restart_services.yml
+ when: openshift.common.rolling_restart_mode == 'services'
+
+- name: Restart active masters
+ hosts: oo_active_masters
+ serial: 1
+ tasks:
+ - include: restart_hosts_pacemaker.yml
+ when: openshift.common.rolling_restart_mode == 'system'
+ - include: restart_services_pacemaker.yml
+ when: openshift.common.rolling_restart_mode == 'services'
+
+- name: Restart current masters
+ hosts: oo_current_masters
+ serial: 1
+ tasks:
+ - include: restart_hosts.yml
+ when: openshift.common.rolling_restart_mode == 'system'
+ - include: restart_services.yml
+ when: openshift.common.rolling_restart_mode == 'services'
diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml
new file mode 100644
index 000000000..ff206f5a2
--- /dev/null
+++ b/playbooks/common/openshift-master/restart_hosts.yml
@@ -0,0 +1,39 @@
+- name: Restart master system
+ # https://github.com/ansible/ansible/issues/10616
+ shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart"
+ async: 1
+ poll: 0
+ ignore_errors: true
+ become: yes
+# When cluster_method != pacemaker we can ensure the api_port is
+# available.
+- name: Wait for master API to come back online
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ inventory_hostname }}"
+ state=started
+ delay=10
+ port="{{ openshift.master.api_port }}"
+ when: openshift.master.cluster_method != 'pacemaker'
+- name: Wait for master to start
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ inventory_hostname }}"
+ state=started
+ delay=10
+ port=22
+ when: openshift.master.cluster_method == 'pacemaker'
+- name: Wait for master to become available
+ command: pcs status
+ register: pcs_status_output
+ until: pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname]) | bool
+ retries: 15
+ delay: 2
+ changed_when: false
+ when: openshift.master.cluster_method == 'pacemaker'
+- fail:
+ msg: >
+ Pacemaker cluster validation failed {{ inventory hostname }} is not online.
+ when: openshift.master.cluster_method == 'pacemaker' and not (pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname])) | bool
diff --git a/playbooks/common/openshift-master/restart_hosts_pacemaker.yml b/playbooks/common/openshift-master/restart_hosts_pacemaker.yml
new file mode 100644
index 000000000..c9219e8de
--- /dev/null
+++ b/playbooks/common/openshift-master/restart_hosts_pacemaker.yml
@@ -0,0 +1,25 @@
+- name: Fail over master resource
+ command: >
+ pcs resource move master {{ hostvars | oo_select_keys(groups['oo_masters_to_config']) | oo_collect('openshift.common.hostname', {'is_active': 'False'}) | list | first }}
+- name: Wait for master API to come back online
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ openshift.master.cluster_hostname }}"
+ state=started
+ delay=10
+ port="{{ openshift.master.api_port }}"
+- name: Restart master system
+ # https://github.com/ansible/ansible/issues/10616
+ shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart"
+ async: 1
+ poll: 0
+ ignore_errors: true
+ become: yes
+- name: Wait for master to start
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ inventory_hostname }}"
+ state=started
+ delay=10
diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml
new file mode 100644
index 000000000..5e539cd65
--- /dev/null
+++ b/playbooks/common/openshift-master/restart_services.yml
@@ -0,0 +1,27 @@
+- name: Restart master
+ service:
+ name: "{{ openshift.common.service_type }}-master"
+ state: restarted
+ when: not openshift_master_ha | bool
+- name: Restart master API
+ service:
+ name: "{{ openshift.common.service_type }}-master-api"
+ state: restarted
+ when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
+- name: Wait for master API to come back online
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ inventory_hostname }}"
+ state=started
+ delay=10
+ port="{{ openshift.master.api_port }}"
+ when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
+- name: Restart master controllers
+ service:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: restarted
+ # Ignore errrors since it is possible that type != simple for
+ # pre-3.1.1 installations.
+ ignore_errors: true
+ when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
diff --git a/playbooks/common/openshift-master/restart_services_pacemaker.yml b/playbooks/common/openshift-master/restart_services_pacemaker.yml
new file mode 100644
index 000000000..e738f3fb6
--- /dev/null
+++ b/playbooks/common/openshift-master/restart_services_pacemaker.yml
@@ -0,0 +1,10 @@
+- name: Restart master services
+ command: pcs resource restart master
+- name: Wait for master API to come back online
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ openshift.master.cluster_hostname }}"
+ state=started
+ delay=10
+ port="{{ openshift.master.api_port }}"
diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml
index 27e1e66f9..f60c5a2b5 100644
--- a/playbooks/common/openshift-master/service.yml
+++ b/playbooks/common/openshift-master/service.yml
@@ -2,6 +2,8 @@
- name: Populate g_service_masters host group if needed
hosts: localhost
gather_facts: no
+ connection: local
+ become: no
tasks:
- fail: msg="new_cluster_state is required to be injected in this playbook"
when: new_cluster_state is not defined
diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml
new file mode 100644
index 000000000..e3f5c17ca
--- /dev/null
+++ b/playbooks/common/openshift-nfs/config.yml
@@ -0,0 +1,5 @@
+---
+- name: Configure nfs hosts
+ hosts: oo_nfs_to_config
+ roles:
+ - role: openshift_storage_nfs
diff --git a/playbooks/common/openshift-nfs/filter_plugins b/playbooks/common/openshift-nfs/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/common/openshift-nfs/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-nfs/lookup_plugins b/playbooks/common/openshift-nfs/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/common/openshift-nfs/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-nfs/roles b/playbooks/common/openshift-nfs/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/common/openshift-nfs/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-nfs/service.yml b/playbooks/common/openshift-nfs/service.yml
new file mode 100644
index 000000000..20c8ca248
--- /dev/null
+++ b/playbooks/common/openshift-nfs/service.yml
@@ -0,0 +1,18 @@
+---
+- name: Populate g_service_nfs host group if needed
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - fail: msg="new_cluster_state is required to be injected in this playbook"
+ when: new_cluster_state is not defined
+
+ - name: Evaluate g_service_nfs
+ add_host: name={{ item }} groups=g_service_nfs
+ with_items: oo_host_group_exp | default([])
+
+- name: Change state on nfs instance(s)
+ hosts: g_service_nfs
+ connection: ssh
+ gather_facts: no
+ tasks:
+ - service: name=nfs-server state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 2b6171cb3..81ec9ab6d 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -16,6 +16,7 @@
hostname: "{{ openshift_hostname | default(None) }}"
public_hostname: "{{ openshift_public_hostname | default(None) }}"
deployment_type: "{{ openshift_deployment_type }}"
+ use_flannel: "{{ openshift_use_flannel | default(None) }}"
- role: node
local_facts:
labels: "{{ openshift_node_labels | default(None) }}"
@@ -58,7 +59,7 @@
- name: Create temp directory for syncing certs
hosts: localhost
connection: local
- sudo: false
+ become: no
gather_facts: no
tasks:
- name: Create local temp directory for syncing certs
@@ -153,19 +154,15 @@
validate_checksum: yes
with_items: nodes_needing_certs
-- name: Configure node instances
+- name: Deploy node certificates
hosts: oo_nodes_to_config
vars:
sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
- openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
- etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
- pre_tasks:
+ tasks:
- name: Ensure certificate directory exists
file:
path: "{{ node_cert_dir }}"
state: directory
-
# TODO: notify restart node
# possibly test service started time against certificate/config file
# timestamps in node to trigger notify
@@ -174,10 +171,49 @@
src: "{{ sync_tmpdir }}/{{ node_subdir }}.tgz"
dest: "{{ node_cert_dir }}"
when: certs_missing
+
+- name: Evaluate node groups
+ hosts: localhost
+ become: no
+ tasks:
+ - name: Evaluate oo_containerized_master_nodes
+ add_host:
+ name: "{{ item }}"
+ groups: oo_containerized_master_nodes
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
+ when: hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
+
+- name: Configure node instances
+ hosts: oo_containerized_master_nodes
+ serial: 1
+ vars:
+ openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
+ openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
roles:
- openshift_node
+
+- name: Configure node instances
+ hosts: oo_nodes_to_config:!oo_containerized_master_nodes
+ vars:
+ openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
+ openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
+ roles:
+ - openshift_node
+
+- name: Additional node config
+ hosts: oo_nodes_to_config
+ vars:
+ # TODO: Prefix flannel role variables.
+ openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
+ etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
+ embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
+ roles:
- role: flannel
when: openshift.common.use_flannel | bool
+ - role: nuage_node
+ when: openshift.common.use_nuage | bool
- role: nickhammond.logrotate
- role: fluentd_node
when: openshift.common.use_fluentd | bool
@@ -189,7 +225,7 @@
- name: Delete temporary directory on localhost
hosts: localhost
connection: local
- sudo: false
+ become: no
gather_facts: no
tasks:
- file: name={{ mktemp.stdout }} state=absent
@@ -211,6 +247,19 @@
| oo_collect('openshift.common.hostname') }}"
openshift_node_vars: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}"
pre_tasks:
-
+ # Necessary because when you're on a node that's also a master the master will be
+ # restarted after the node restarts docker and it will take up to 60 seconds for
+ # systemd to start the master again
+ - name: Wait for master API to become available before proceeding
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl -k --head --silent {{ openshift.master.api_url }}
+ register: api_available_output
+ until: api_available_output.stdout.find("200 OK") != -1
+ retries: 120
+ delay: 1
+ changed_when: false
+ when: openshift.common.is_containerized | bool
roles:
- openshift_manage_node
diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml
index 5cf83e186..0f07add2a 100644
--- a/playbooks/common/openshift-node/service.yml
+++ b/playbooks/common/openshift-node/service.yml
@@ -1,6 +1,8 @@
---
- name: Populate g_service_nodes host group if needed
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
tasks:
- fail: msg="new_cluster_state is required to be injected in this playbook"