From e6c159afb4ba39a7266c750d43d6a5e911cc8f21 Mon Sep 17 00:00:00 2001 From: Michael Gugino Date: Mon, 18 Dec 2017 16:13:36 -0500 Subject: Remove openshift.common.{is_atomic|is_containerized} We set these variables using facts in init, no need to duplicate the logic all around the codebase. --- playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml') diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml index e8c0f361a..a90082760 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml @@ -43,7 +43,7 @@ tasks: - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s -- cgit v1.2.3 From eacc12897ca86a255f89b8a4537ce2b7004cf319 Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Fri, 5 Jan 2018 12:44:56 -0500 Subject: Migrate to import_role for static role inclusion In Ansible 2.2, the include_role directive came into existence as a Tech Preview. It is still a Tech Preview through Ansible 2.4 (and in current devel branch), but with a noteable change. The default behavior switched from static: true to static: false because that functionality moved to the newly introduced import_role directive (in order to stay consistent with include* being dynamic in nature and `import* being static in nature). The dynamic include is considerably more memory intensive as it will dynamically create a role import for every host in the inventory list to be used. (Also worth noting, there is at the time of this writing an object allocation inefficiency in the dynamic include that can in certain situations amplify this effect considerably) This change is meant to mitigate the pressure on memory for the Ansible control host. We need to evaluate where it makes sense to dynamically include roles and revert back to dynamic inclusion if and where it makes sense to do so. --- playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml') diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml index a90082760..6d59bfd0b 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml @@ -3,7 +3,7 @@ hosts: localhost tasks: - name: build upgrade scale groups - include_role: + import_role: name: openshift_aws tasks_from: upgrade_node_group.yml @@ -61,6 +61,6 @@ hosts: localhost tasks: - name: clean up scale group - include_role: + import_role: name: openshift_aws tasks_from: remove_scale_group.yml -- cgit v1.2.3 From 0841917f05cfad2701164edbb271167c277d3300 Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Thu, 10 Aug 2017 09:25:36 -0400 Subject: Add the ability to specify a timeout for node drain operations --- playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml') diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml index 6d59bfd0b..e259b5d09 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml @@ -50,11 +50,11 @@ delegate_to: "{{ groups.oo_first_master.0 }}" register: l_upgrade_nodes_drain_result until: not (l_upgrade_nodes_drain_result is failed) - retries: "{{ 1 if openshift_upgrade_nodes_drain_timeout | default(0) == '0' else 0 | int }}" + retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}" delay: 5 failed_when: - l_upgrade_nodes_drain_result is failed - - openshift_upgrade_nodes_drain_timeout | default(0) == '0' + - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0 # Alright, let's clean up! - name: clean up the old scale group -- cgit v1.2.3