summaryrefslogtreecommitdiffstats
path: root/playbooks/common/openshift-cluster/upgrades
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/common/openshift-cluster/upgrades')
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml5
2 files changed, 6 insertions, 5 deletions
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 75ae92716..22d56f6d3 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -111,9 +111,9 @@
origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}"
ent_reconcile_bindings: true
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- # Similar to pre.yml, we don't want to upgrade docker during the openshift_cli role,
- # it will be updated when we perform node upgrade.
- docker_protect_installed_version: True
+ # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
+ # restart.
+ skip_docker_role: True
tasks:
- name: Verifying the correct commandline tools are available
shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}}
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index a54349220..917c95e29 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -7,10 +7,11 @@
any_errors_fatal: true
roles:
- openshift_facts
+ - docker
handlers:
- include: ../../../../roles/openshift_node/handlers/main.yml
static: yes
- tasks:
+ pre_tasks:
# TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
# or docker actually needs an upgrade before proceeding. Perhaps best to save this until
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
@@ -37,7 +38,7 @@
{{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --evacuate --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade
-
+ tasks:
- include: docker/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool