summaryrefslogtreecommitdiffstats
path: root/playbooks/common/openshift-cluster/upgrades/upgrade.yml
diff options
context:
space:
mode:
authorDevan Goodwin <dgoodwin@redhat.com>2016-07-15 15:50:31 -0300
committerDevan Goodwin <dgoodwin@redhat.com>2016-07-25 12:16:16 -0300
commit31707737a5473bb5cf6545b6675b192a64a913c7 (patch)
tree0164ca7dfec4e4012b86ad1824887562191e9c7a /playbooks/common/openshift-cluster/upgrades/upgrade.yml
parentd0c75ba21ba84a9df4a6cd12f02ce335ea4f9427 (diff)
downloadopenshift-31707737a5473bb5cf6545b6675b192a64a913c7.tar.gz
openshift-31707737a5473bb5cf6545b6675b192a64a913c7.tar.bz2
openshift-31707737a5473bb5cf6545b6675b192a64a913c7.tar.xz
openshift-31707737a5473bb5cf6545b6675b192a64a913c7.zip
Introduce 1.3/3.3 upgrade path.
Refactored the 3.2 upgrade common files out to a path that does not indicate they are strictly for 3.2. 3.3 upgrade then becomes a relatively small copy of the byo entry point, all calling the same code as 3.2 upgrade. Thus far there are no known 3.3 specific upgrade tasks. In future we will likely want to allow hooks out to version specific pre/upgrade/post tasks. Also fixes a bug where the handlers were not restarting nodes/openvswitch containers doing upgrades, due to a change in Ansible 2+.
Diffstat (limited to 'playbooks/common/openshift-cluster/upgrades/upgrade.yml')
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade.yml169
1 files changed, 169 insertions, 0 deletions
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/upgrade.yml
new file mode 100644
index 000000000..e7638982c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade.yml
@@ -0,0 +1,169 @@
+---
+###############################################################################
+# The restart playbook should be run after this playbook completes.
+###############################################################################
+
+###############################################################################
+# Upgrade Masters
+###############################################################################
+- name: Upgrade master
+ hosts: oo_masters_to_config
+ handlers:
+ - include: ../../../../roles/openshift_master/handlers/main.yml
+ static: yes
+ roles:
+ - openshift_facts
+ tasks:
+ - include: rpm_upgrade.yml component=master
+ when: not openshift.common.is_containerized | bool
+
+ - include_vars: ../../../../roles/openshift_master/vars/main.yml
+
+ - name: Update systemd units
+ include: ../../../../roles/openshift_master/tasks/systemd_units.yml
+
+# - name: Upgrade master configuration
+# openshift_upgrade_config:
+# from_version: '3.1'
+# to_version: '3.2'
+# role: master
+# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
+
+- name: Set master update status to complete
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ master_update_complete: True
+
+##############################################################################
+# Gate on master update complete
+##############################################################################
+- name: Gate on master update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ master_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
+ - set_fact:
+ master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
+ when: master_update_failed | length > 0
+
+###############################################################################
+# Upgrade Nodes
+###############################################################################
+
+# Here we handle all tasks that might require a node evac. (upgrading docker, and the node service)
+- name: Perform upgrades that may require node evacuation
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_nodes_to_config
+ serial: 1
+ any_errors_fatal: true
+ roles:
+ - openshift_facts
+ handlers:
+ - include: ../../../../roles/openshift_node/handlers/main.yml
+ static: yes
+ tasks:
+ # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
+ # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
+ # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
+ - name: Mark unschedulable if host is a node
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_config
+
+ - name: Evacuate Node for Kubelet upgrade
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_config
+
+ # Only check if docker upgrade is required if docker_upgrade is not
+ # already set to False.
+ - include: docker/upgrade_check.yml
+ when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool
+
+ - include: docker/upgrade.yml
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
+
+ - include: rpm_upgrade.yml
+ vars:
+ component: "node"
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ when: inventory_hostname in groups.oo_nodes_to_config and not openshift.common.is_containerized | bool
+
+ - include: containerized_node_upgrade.yml
+ when: inventory_hostname in groups.oo_nodes_to_config and openshift.common.is_containerized | bool
+
+ - name: Set node schedulability
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_config and openshift.node.schedulable | bool
+
+
+###############################################################################
+# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints
+###############################################################################
+
+- name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints
+ hosts: oo_masters_to_config
+ roles:
+ - { role: openshift_cli }
+ vars:
+ origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}"
+ ent_reconcile_bindings: true
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ tasks:
+ - name: Verifying the correct commandline tools are available
+ shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}}
+ when: openshift.common.is_containerized | bool and verify_upgrade_version is defined
+
+ - name: Reconcile Cluster Roles
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-roles --additive-only=true --confirm
+ run_once: true
+
+ - name: Reconcile Cluster Role Bindings
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-role-bindings
+ --exclude-groups=system:authenticated
+ --exclude-groups=system:authenticated:oauth
+ --exclude-groups=system:unauthenticated
+ --exclude-users=system:anonymous
+ --additive-only=true --confirm
+ when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
+ run_once: true
+
+ - name: Reconcile Security Context Constraints
+ command: >
+ {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true
+ run_once: true
+
+ - set_fact:
+ reconcile_complete: True
+
+##############################################################################
+# Gate on reconcile
+##############################################################################
+- name: Gate on reconcile
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ reconcile_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
+ - set_fact:
+ reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
+ when: reconcile_failed | length > 0