summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--filter_plugins/oo_filters.py32
-rw-r--r--inventory/byo/hosts.origin.example8
-rw-r--r--inventory/byo/hosts.ose.example7
-rw-r--r--openshift-ansible.spec92
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml10
l---------playbooks/byo/openshift-cluster/upgrades/v3_3/roles1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml138
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml100
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml102
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml50
-rw-r--r--playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml40
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml (renamed from playbooks/common/openshift-cluster/upgrades/post.yml)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre.yml311
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml6
l---------playbooks/common/openshift-cluster/upgrades/pre/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml31
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml23
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml13
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml45
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml (renamed from playbooks/common/openshift-cluster/upgrades/upgrade.yml)190
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml75
-rw-r--r--playbooks/common/openshift-loadbalancer/config.yml11
-rw-r--r--roles/docker/defaults/main.yml1
-rw-r--r--roles/docker/tasks/main.yml8
-rw-r--r--roles/nuage_common/defaults/main.yaml3
-rw-r--r--roles/nuage_master/meta/main.yml12
-rw-r--r--roles/nuage_master/templates/nuage-openshift-monitor.j29
-rw-r--r--roles/nuage_master/vars/main.yaml1
-rw-r--r--roles/openshift_cli/meta/main.yml1
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py8
-rw-r--r--roles/openshift_loadbalancer/README.md51
-rw-r--r--roles/openshift_loadbalancer/meta/main.yml4
-rw-r--r--roles/openshift_loadbalancer/tasks/main.yml4
-rw-r--r--roles/openshift_loadbalancer/templates/haproxy.cfg.j28
-rw-r--r--roles/openshift_loadbalancer_facts/README.md34
-rw-r--r--roles/openshift_loadbalancer_facts/meta/main.yml13
-rw-r--r--roles/openshift_loadbalancer_facts/tasks/main.yml30
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j23
-rw-r--r--roles/openshift_version/meta/main.yml2
-rw-r--r--utils/src/ooinstall/cli_installer.py4
46 files changed, 973 insertions, 581 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index eb4b07935..c913ecda8 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.4.2-1 ./
+3.4.3-1 ./
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 39e6b0a0b..5358a244e 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -7,6 +7,7 @@ Custom filters for use in openshift-ansible
from ansible import errors
from collections import Mapping
+from distutils.util import strtobool
from distutils.version import LooseVersion
from operator import itemgetter
import OpenSSL.crypto
@@ -858,6 +859,35 @@ class FilterModule(object):
# netloc wasn't parsed, assume url was missing scheme and path
return parse_result.path
+ @staticmethod
+ def oo_openshift_loadbalancer_frontends(api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
+ loadbalancer_frontends = [{'name': 'atomic-openshift-api',
+ 'mode': 'tcp',
+ 'options': ['tcplog'],
+ 'binds': ["*:{0}".format(api_port)],
+ 'default_backend': 'atomic-openshift-api'}]
+ if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
+ loadbalancer_frontends.append({'name': 'nuage-monitor',
+ 'mode': 'tcp',
+ 'options': ['tcplog'],
+ 'binds': ["*:{0}".format(nuage_rest_port)],
+ 'default_backend': 'nuage-monitor'})
+ return loadbalancer_frontends
+
+ @staticmethod
+ def oo_openshift_loadbalancer_backends(api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
+ loadbalancer_backends = [{'name': 'atomic-openshift-api',
+ 'mode': 'tcp',
+ 'option': 'tcplog',
+ 'balance': 'source',
+ 'servers': FilterModule.oo_haproxy_backend_masters(servers_hostvars, api_port)}]
+ if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
+ loadbalancer_backends.append({'name': 'nuage-monitor',
+ 'mode': 'tcp',
+ 'option': 'tcplog',
+ 'balance': 'source',
+ 'servers': FilterModule.oo_haproxy_backend_masters(servers_hostvars, nuage_rest_port)})
+ return loadbalancer_backends
def filters(self):
""" returns a mapping of filters to methods """
@@ -891,4 +921,6 @@ class FilterModule(object):
"oo_merge_dicts": self.oo_merge_dicts,
"oo_hostname_from_url": self.oo_hostname_from_url,
"oo_merge_hostvars": self.oo_merge_hostvars,
+ "oo_openshift_loadbalancer_frontends": self.oo_openshift_loadbalancer_frontends,
+ "oo_openshift_loadbalancer_backends": self.oo_openshift_loadbalancer_backends
}
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index 3b43b5d0a..7febefe95 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -95,7 +95,6 @@ openshift_release=v1.2
# modify image streams to point at that registry by setting the following to true
#openshift_examples_modify_imagestreams=true
-
# Origin copr repo
#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
@@ -441,6 +440,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# your nodes, pods, or service CIDRs for security reasons.
#openshift_master_external_ip_network_cidrs=['0.0.0.0/0']
+# IngressIPNetworkCIDR controls the range to assign ingress IPs from for
+# services of type LoadBalancer on bare metal. If empty, ingress IPs will not
+# be assigned. It may contain a single CIDR that will be allocated from. For
+# security reasons, you should ensure that this range does not overlap with
+# the CIDRs reserved for external IPs, nodes, pods, or services.
+#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
+
# Configure number of bits to allocate to each host’s subnet e.g. 8
# would mean a /24 network on the host.
#osm_host_subnet_length=8
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 19519da50..2645d4510 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -440,6 +440,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# your nodes, pods, or service CIDRs for security reasons.
#openshift_master_external_ip_network_cidrs=['0.0.0.0/0']
+# IngressIPNetworkCIDR controls the range to assign ingress IPs from for
+# services of type LoadBalancer on bare metal. If empty, ingress IPs will not
+# be assigned. It may contain a single CIDR that will be allocated from. For
+# security reasons, you should ensure that this range does not overlap with
+# the CIDRs reserved for external IPs, nodes, pods, or services.
+#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
+
# Configure number of bits to allocate to each host’s subnet e.g. 8
# would mean a /24 network on the host.
#osm_host_subnet_length=8
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index d31447d7a..a8f03d17d 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.4.2
+Version: 3.4.3
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -249,6 +249,96 @@ Atomic OpenShift Utilities includes
%changelog
+* Tue Oct 04 2016 Scott Dodson <sdodson@redhat.com> 3.4.3-1
+- Check if openshift_master_ingress_ip_network_cidr is defined
+ (Mathias.Merscher@dg-i.net)
+- allow networkConfig.ingressIPNetworkCIDRs to be configured
+ (Mathias.Merscher@dg-i.net)
+- Filterize haproxy frontends/backends and add method for providing additional
+ frontends/backends. (abutcher@redhat.com)
+- a-o-i: Force option should allow reinstall (smunilla@redhat.com)
+- a-o-i: Fix openshift_node_labels (smunilla@redhat.com)
+- Enable registry support for image pruning (andrew@andrewklau.com)
+- Default openshift_hosted_{logging,metrics}_deploy to false.
+ (abutcher@redhat.com)
+- README_CONTAINERIZED_INSTALLATION: fixed link markdown
+ (jakub.kramarz@freshmail.pl)
+- README_AWS: makes links consistent and working again
+ (jakub.kramarz@freshmail.pl)
+- a-o-i: Allow better setting of host level variables (smunilla@redhat.com)
+- Further secure registry improvements (abutcher@redhat.com)
+- Delgate handlers to first master (smunilla@redhat.com)
+- Secure registry improvements. (abutcher@redhat.com)
+- Install Registry by Default (smunilla@redhat.com)
+- Update play names for consistency. (abutcher@redhat.com)
+- Addressed review comments (vishal.patil@nuagenetworks.net)
+- Configure ops cluster storage to match normal cluster storage
+ (sdodson@redhat.com)
+- Fix bug with service signer cert on upgrade. (dgoodwin@redhat.com)
+- Add messages to let the user know if some plays were skipped, but it's ok.
+ Also, remove the final 'press a key to continue' prompt.
+ (tbielawa@redhat.com)
+- Set named certificate destinations as basenames of provided paths.
+ (abutcher@redhat.com)
+- 'fix' unittests by removing the users ability to specify an ansible config
+ (tbielawa@redhat.com)
+- Copy and paste more methods (tbielawa@redhat.com)
+- Silence/dot-print more actions in the callback (tbielawa@redhat.com)
+- Fix conflicts in spec file (tbielawa@redhat.com)
+- Use pre_upgrade tag instread of a dry run variable. (dgoodwin@redhat.com)
+- Move etcd backup from pre-upgrade to upgrade itself. (dgoodwin@redhat.com)
+- Allow a couple retries when unscheduling/rescheduling nodes in upgrade.
+ (dgoodwin@redhat.com)
+- Skip the docker role in early upgrade stages. (dgoodwin@redhat.com)
+- Allow filtering nodes to upgrade by label. (dgoodwin@redhat.com)
+- Allow customizing node upgrade serial value. (dgoodwin@redhat.com)
+- Split upgrade for control plane/nodes. (dgoodwin@redhat.com)
+- Set the DomainName or DomainID in the OpenStack cloud provider
+ (lhuard@amadeus.com)
+- Use ansible.module_utils._text.to_text instead of
+ ansible.utils.unicode.to_unicode. (abutcher@redhat.com)
+- Suppress more warnings. (abutcher@redhat.com)
+- Add gitHTTPProxy and gitHTTPSProxy to advanced config json option
+ (sdodson@redhat.com)
+- Don't set IMAGE_PREFIX if openshift_cockpit_deployer_prefix is empty
+ (Robert.Bohne@ConSol.de)
+- Update spec file to install manpage (tbielawa@redhat.com)
+- Verify masters are upgraded before proceeding with node only upgrade.
+ (dgoodwin@redhat.com)
+- Attempt to tease apart pre upgrade for masters/nodes. (dgoodwin@redhat.com)
+- Split upgrade entry points into control plane/node. (dgoodwin@redhat.com)
+- Reunite upgrade reconciliation gating with the play it gates on.
+ (dgoodwin@redhat.com)
+- Drop atomic-enterprise as a valid deployment type in upgrade.
+ (dgoodwin@redhat.com)
+- Stop guarding against pacemaker in upgrade, no longer necessary.
+ (dgoodwin@redhat.com)
+- Support openshift_upgrade_dry_run=true for pre-upgrade checks only.
+ (dgoodwin@redhat.com)
+- Make rhel_subscribe role default to OpenShift Container Platform 3.3
+ (lhuard@amadeus.com)
+- Addresses most comments from @adellape (tbielawa@redhat.com)
+- Changes for Nuage HA (vishal.patil@nuagenetworks.net)
+- Fix deployer template for enterprise (sdodson@redhat.com)
+- Add a manpage for atomic-openshift-installer (tbielawa@redhat.com)
+- Remove the DNS VM on OpenStack (lhuard@amadeus.com)
+- tweak logic (jdetiber@redhat.com)
+- test fix for systemd changes (sdodson@redhat.com)
+- Set default_subdomain properly for logging (sdodson@redhat.com)
+- Adjust wait for loops (sdodson@redhat.com)
+- Add storage for logging (sdodson@redhat.com)
+- Fix some bugs in OpenShift Hosted Logging role (contact@stephane-klein.info)
+- Add some sample inventory stuff, will update this later (sdodson@redhat.com)
+- Label all nodes for fluentd (sdodson@redhat.com)
+- Rename openshift_hosted_logging_image_{prefix,version} to match metrics
+ (sdodson@redhat.com)
+- Fix deployer template for enterprise (sdodson@redhat.com)
+- Add logging to install playbooks (sdodson@redhat.com)
+- Fix OpenStack cloud provider (lhuard@amadeus.com)
+- Add rhaos-3.4-rhel-7 releaser to tito (sdodson@redhat.com)
+- Fix the nodeName of the OpenShift nodes on OpenStack (lhuard@amadeus.com)
+- Fix GCE Launch (brad@nolab.org)
+
* Mon Sep 26 2016 Scott Dodson <sdodson@redhat.com> 3.4.2-1
- Add an issue template (sdodson@redhat.com)
- Add openshift_hosted_router_name (andrew@andrewklau.com)
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 1fa32570c..9be6becc1 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -1,6 +1,6 @@
- name: Check for appropriate Docker versions
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
roles:
- openshift_facts
tasks:
@@ -19,7 +19,7 @@
# don't want to carry on, potentially taking out every node. The playbook can safely be re-run
# and will not take any action on a node already running the requested docker version.
- name: Evacuate and upgrade nodes
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
serial: 1
any_errors_fatal: true
tasks:
@@ -27,13 +27,13 @@
command: >
{{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
- name: Evacuate Node for Kubelet upgrade
command: >
{{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --evacuate --force
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
- include: ../../../../common/openshift-cluster/upgrades/docker/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool
@@ -43,5 +43,5 @@
{{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift.node.schedulable | bool
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config and openshift.node.schedulable | bool
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade and openshift.node.schedulable | bool
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/roles b/playbooks/byo/openshift-cluster/upgrades/v3_3/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
index e740b12c0..7a3829283 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -1,67 +1,101 @@
---
-- include: ../../../../common/openshift-cluster/verify_ansible_version.yml
-
-- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: g_all_hosts | default([])
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../../../../common/openshift-cluster/upgrades/init.yml
+ tags:
+ - pre_upgrade
+# Configure the upgrade target for the common upgrade tasks:
- hosts: l_oo_all_hosts
- gather_facts: no
+ tags:
+ - pre_upgrade
tasks:
- - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
-
-- include: ../../../../common/openshift-cluster/evaluate_groups.yml
- vars:
- # Do not allow adding hosts during upgrade.
- g_new_master_hosts: []
- g_new_node_hosts: []
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_deployment_type: "{{ deployment_type }}"
-
-- name: Set oo_options
- hosts: oo_all_hosts
- tasks:
- - set_fact:
- openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
- when: openshift_docker_additional_registries is not defined
- - set_fact:
- openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}"
- when: openshift_docker_insecure_registries is not defined
- - set_fact:
- openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}"
- when: openshift_docker_blocked_registries is not defined
- set_fact:
- openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}"
- when: openshift_docker_options is not defined
- - set_fact:
- openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}"
- when: openshift_docker_log_driver is not defined
- - set_fact:
- openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}"
- when: openshift_docker_log_options is not defined
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+# Pre-upgrade
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
+- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
tasks:
- set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
-- include: ../../../../common/openshift-cluster/upgrades/pre.yml
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
vars:
- openshift_deployment_type: "{{ deployment_type }}"
-- include: ../../../../common/openshift-cluster/upgrades/upgrade.yml
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
vars:
- openshift_deployment_type: "{{ deployment_type }}"
master_config_hook: "v3_3/master_config_upgrade.yml"
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
+ vars:
node_config_hook: "v3_3/node_config_upgrade.yml"
+
- include: ../../../openshift-master/restart.yml
-- include: ../../../../common/openshift-cluster/upgrades/post.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
new file mode 100644
index 000000000..d6af71827
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -0,0 +1,100 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../../../../common/openshift-cluster/upgrades/init.yml
+ tags:
+ - pre_upgrade
+
+# Configure the upgrade target for the common upgrade tasks:
+- hosts: l_oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+# Pre-upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_3/master_config_upgrade.yml"
+
+- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
new file mode 100644
index 000000000..e2a33cc00
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
@@ -0,0 +1,102 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../../../../common/openshift-cluster/upgrades/init.yml
+ tags:
+ - pre_upgrade
+
+# Configure the upgrade target for the common upgrade tasks:
+- hosts: l_oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+# Pre-upgrade
+- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
+ vars:
+ node_config_hook: "v3_3/node_config_upgrade.yml"
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index 04dde632b..6d83d2527 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -11,3 +11,5 @@
hostname: "{{ openshift_hostname | default(None) }}"
- set_fact:
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ - set_fact:
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
new file mode 100644
index 000000000..6e953be69
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
@@ -0,0 +1,22 @@
+---
+- name: Check Docker image count
+ shell: "docker images -aq | wc -l"
+ register: docker_image_count
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- debug: var=docker_image_count.stdout
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- name: Remove unused Docker images for Docker 1.10+ migration
+ shell: "docker rmi `docker images -aq`"
+ # Will fail on images still in use:
+ failed_when: false
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- name: Check Docker image count
+ shell: "docker images -aq | wc -l"
+ register: docker_image_count
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- debug: var=docker_image_count.stdout
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
index e8a20aa2b..78f6c46f3 100644
--- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
+++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
@@ -9,6 +9,7 @@
local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
register: local_cert_sync_tmpdir
changed_when: false
+ when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
- name: Create service signer certificate
hosts: oo_first_master
@@ -17,6 +18,7 @@
command: mktemp -d /tmp/openshift-ansible-XXXXXXX
register: remote_cert_create_tmpdir
changed_when: false
+ when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
- name: Create service signer certificate
command: >
@@ -27,6 +29,7 @@
--serial=service-signer.serial.txt
args:
chdir: "{{ remote_cert_create_tmpdir.stdout }}/"
+ when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
- name: Retrieve service signer certificate
fetch:
@@ -38,12 +41,14 @@
with_items:
- "service-signer.crt"
- "service-signer.key"
+ when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
- name: Delete remote temp directory
file:
name: "{{ remote_cert_create_tmpdir.stdout }}"
state: absent
changed_when: false
+ when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
- name: Deploy service signer certificate
hosts: oo_masters_to_config
@@ -55,6 +60,7 @@
with_items:
- "service-signer.crt"
- "service-signer.key"
+ when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
- name: Delete local temp directory
hosts: localhost
@@ -67,3 +73,4 @@
name: "{{ local_cert_sync_tmpdir.stdout }}"
state: absent
changed_when: false
+ when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
index 8002af4fc..fc26d029e 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -1,7 +1,7 @@
---
# This snippet determines if a Docker upgrade is required by checking the inventory
-# variables, the available packages, and sets l_docker_version to True if so.
+# variables, the available packages, and sets l_docker_upgrade to True if so.
- set_fact:
docker_upgrade: True
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
new file mode 100644
index 000000000..f3b3abe0d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -0,0 +1,50 @@
+---
+- include: ../verify_ansible_version.yml
+
+- hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts | default([])
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml
+
+- include: ../evaluate_groups.yml
+ vars:
+ # Do not allow adding hosts during upgrade.
+ g_new_master_hosts: []
+ g_new_node_hosts: []
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+
+- name: Set oo_options
+ hosts: oo_all_hosts
+ tasks:
+ - set_fact:
+ openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
+ when: openshift_docker_additional_registries is not defined
+ - set_fact:
+ openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}"
+ when: openshift_docker_insecure_registries is not defined
+ - set_fact:
+ openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}"
+ when: openshift_docker_blocked_registries is not defined
+ - set_fact:
+ openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}"
+ when: openshift_docker_options is not defined
+ - set_fact:
+ openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}"
+ when: openshift_docker_log_driver is not defined
+ - set_fact:
+ openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}"
+ when: openshift_docker_log_options is not defined
+
+- include: ../initialize_facts.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
new file mode 100644
index 000000000..4e375ac26
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
@@ -0,0 +1,40 @@
+---
+- name: Filter list of nodes to be upgraded if necessary
+ hosts: oo_first_master
+ tasks:
+ - name: Retrieve list of openshift nodes matching upgrade label
+ command: >
+ {{ openshift.common.client_binary }}
+ get nodes
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --selector={{ openshift_upgrade_nodes_label }}
+ -o jsonpath='{.items[*].metadata.name}'
+ register: matching_nodes
+ changed_when: false
+ when: openshift_upgrade_nodes_label is defined
+
+ - set_fact:
+ nodes_to_upgrade: "{{ matching_nodes.stdout.split(' ') }}"
+ when: openshift_upgrade_nodes_label is defined
+
+ # We got a list of nodes with the label, now we need to match these with inventory hosts
+ # using their openshift.common.hostname fact.
+ - name: Map labelled nodes to inventory hosts
+ add_host:
+ name: "{{ item }}"
+ groups: temp_nodes_to_upgrade
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: " {{ groups['oo_nodes_to_config'] }}"
+ when: openshift_upgrade_nodes_label is defined and hostvars[item].openshift.common.hostname in nodes_to_upgrade
+ changed_when: false
+
+ # Build up the oo_nodes_to_upgrade group, use the list filtered by label if
+ # present, otherwise hit all nodes:
+ - name: Evaluate oo_nodes_to_upgrade
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_upgrade
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups['temp_nodes_to_upgrade'] | default(groups['oo_nodes_to_config']) }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/post.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index e43954453..e43954453 100644
--- a/playbooks/common/openshift-cluster/upgrades/post.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre.yml b/playbooks/common/openshift-cluster/upgrades/pre.yml
deleted file mode 100644
index 42a24eaf8..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre.yml
+++ /dev/null
@@ -1,311 +0,0 @@
----
-###############################################################################
-# Evaluate host groups and gather facts
-###############################################################################
-
-- include: ../initialize_facts.yml
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_config
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- name: Evaluate additional groups for upgrade
- hosts: localhost
- connection: local
- become: no
- tasks:
- - name: Evaluate etcd_hosts_to_backup
- add_host:
- name: "{{ item }}"
- groups: etcd_hosts_to_backup
- with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master
-
-###############################################################################
-# Pre-upgrade checks
-###############################################################################
-- name: Verify upgrade can proceed on first master
- hosts: oo_first_master
- vars:
- g_pacemaker_upgrade_url_segment: "{{ 'org/latest' if deployment_type =='origin' else '.com/enterprise/3.1' }}"
- gather_facts: no
- tasks:
- - fail:
- msg: >
- This upgrade is only supported for atomic-enterprise, origin, openshift-enterprise, and online
- deployment types
- when: deployment_type not in ['atomic-enterprise', 'origin','openshift-enterprise', 'online']
-
- - fail:
- msg: >
- This upgrade does not support Pacemaker:
- https://docs.openshift.{{ g_pacemaker_upgrade_url_segment }}/install_config/upgrading/pacemaker_to_native_ha.html
- when: openshift.master.cluster_method is defined and openshift.master.cluster_method == 'pacemaker'
-
- # Error out in situations where the user has older versions specified in their
- # inventory in any of the openshift_release, openshift_image_tag, and
- # openshift_pkg_version variables. These must be removed or updated to proceed
- # with upgrade.
- # TODO: Should we block if you're *over* the next major release version as well?
- - fail:
- msg: >
- openshift_pkg_version is {{ openshift_pkg_version }} which is not a
- valid version for a {{ openshift_upgrade_target }} upgrade
- when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<')
-
- - fail:
- msg: >
- openshift_image_tag is {{ openshift_image_tag }} which is not a
- valid version for a {{ openshift_upgrade_target }} upgrade
- when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<')
-
- - set_fact:
- openshift_release: "{{ openshift_release[1:] }}"
- when: openshift_release is defined and openshift_release[0] == 'v'
-
- - fail:
- msg: >
- openshift_release is {{ openshift_release }} which is not a
- valid release for a {{ openshift_upgrade_target }} upgrade
- when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=')
-
-- include: ../../../common/openshift-cluster/initialize_openshift_version.yml
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
- # Docker role (a dependency) should be told not to do anything to installed version
- # of docker, we handle this separately during upgrade. (the inventory may have a
- # docker_version defined, we don't want to actually do it until later)
- docker_protect_installed_version: True
-
-- name: Verify master processes
- hosts: oo_masters_to_config
- roles:
- - openshift_facts
- tasks:
- - openshift_facts:
- role: master
- local_facts:
- ha: "{{ groups.oo_masters_to_config | length > 1 }}"
-
- - name: Ensure Master is running
- service:
- name: "{{ openshift.common.service_type }}-master"
- state: started
- enabled: yes
- when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool
-
- - name: Ensure HA Master is running
- service:
- name: "{{ openshift.common.service_type }}-master-api"
- state: started
- enabled: yes
- when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
-
- - name: Ensure HA Master is running
- service:
- name: "{{ openshift.common.service_type }}-master-controllers"
- state: started
- enabled: yes
- when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
-
-- name: Verify node processes
- hosts: oo_nodes_to_config
- roles:
- - openshift_facts
- - openshift_docker_facts
- tasks:
- - name: Ensure Node is running
- service:
- name: "{{ openshift.common.service_type }}-node"
- state: started
- enabled: yes
- when: openshift.common.is_containerized | bool
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_config
- vars:
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- pre_tasks:
- - fail:
- msg: Verify OpenShift is already installed
- when: openshift.common.version is not defined
-
- - fail:
- msg: Verify the correct version was found
- when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version
-
- - name: Clean package cache
- command: "{{ ansible_pkg_mgr }} clean all"
- when: not openshift.common.is_atomic | bool
-
- - set_fact:
- g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
- when: not openshift.common.is_containerized | bool
-
- - name: Verify containers are available for upgrade
- command: >
- docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
- when: openshift.common.is_containerized | bool
-
- - name: Check latest available OpenShift RPM version
- command: >
- {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}"
- failed_when: false
- changed_when: false
- register: avail_openshift_version
- when: not openshift.common.is_containerized | bool
-
- - name: Verify OpenShift RPMs are available for upgrade
- fail:
- msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required"
- when: not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<')
-
- - fail:
- msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
- when: deployment_type == 'origin' and openshift.common.version | version_compare(openshift_upgrade_min,'<')
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
- tasks:
- # Only check if docker upgrade is required if docker_upgrade is not
- # already set to False.
- - include: docker/upgrade_check.yml
- when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool
-
- # Additional checks for Atomic hosts:
-
- - name: Determine available Docker
- shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
- register: g_atomic_docker_version_result
- when: openshift.common.is_atomic | bool
-
- - set_fact:
- l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
- when: openshift.common.is_atomic | bool
-
- - fail:
- msg: This playbook requires access to Docker 1.10 or later
- when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.10','<')
-
- - set_fact:
- pre_upgrade_complete: True
-
-
-##############################################################################
-# Gate on pre-upgrade checks
-##############################################################################
-- name: Gate on pre-upgrade checks
- hosts: localhost
- connection: local
- become: no
- vars:
- pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
- tasks:
- - set_fact:
- pre_upgrade_completed: "{{ hostvars
- | oo_select_keys(pre_upgrade_hosts)
- | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
- - set_fact:
- pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
- when: pre_upgrade_failed | length > 0
-
-###############################################################################
-# Backup etcd
-###############################################################################
-- name: Backup etcd
- hosts: etcd_hosts_to_backup
- vars:
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
- timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- roles:
- - openshift_facts
- tasks:
- # Ensure we persist the etcd role for this host in openshift_facts
- - openshift_facts:
- role: etcd
- local_facts: {}
- when: "'etcd' not in openshift"
-
- - stat: path=/var/lib/openshift
- register: var_lib_openshift
-
- - stat: path=/var/lib/origin
- register: var_lib_origin
-
- - name: Create origin symlink if necessary
- file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
- when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
-
- # TODO: replace shell module with command and update later checks
- # We assume to be using the data dir for all backups.
- - name: Check available disk space for etcd backup
- shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
- register: avail_disk
-
- # TODO: replace shell module with command and update later checks
- - name: Check current embedded etcd disk usage
- shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
- register: etcd_disk_usage
- when: embedded_etcd | bool
-
- - name: Abort if insufficient disk space for etcd backup
- fail:
- msg: >
- {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
- {{ avail_disk.stdout }} Kb available.
- when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
-
- - name: Install etcd (for etcdctl)
- action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
- when: not openshift.common.is_atomic | bool
-
- - name: Generate etcd backup
- command: >
- etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
- --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
-
- - set_fact:
- etcd_backup_complete: True
-
- - name: Display location of etcd backup
- debug:
- msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
-
-
-##############################################################################
-# Gate on etcd backup
-##############################################################################
-- name: Gate on etcd backup
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.etcd_hosts_to_backup)
- | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
- - set_fact:
- etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
- when: etcd_backup_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml
new file mode 100644
index 000000000..8ecae4539
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml
@@ -0,0 +1,6 @@
+---
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ pre_upgrade_complete: True
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/roles b/playbooks/common/openshift-cluster/upgrades/pre/roles
new file mode 120000
index 000000000..415645be6
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/roles
@@ -0,0 +1 @@
+../../../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
new file mode 100644
index 000000000..06eb5f936
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
@@ -0,0 +1,31 @@
+---
+- name: Verify master processes
+ hosts: oo_masters_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - openshift_facts:
+ role: master
+ local_facts:
+ ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+
+ - name: Ensure Master is running
+ service:
+ name: "{{ openshift.common.service_type }}-master"
+ state: started
+ enabled: yes
+ when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool
+
+ - name: Ensure HA Master is running
+ service:
+ name: "{{ openshift.common.service_type }}-master-api"
+ state: started
+ enabled: yes
+ when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
+
+ - name: Ensure HA Master is running
+ service:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
+ enabled: yes
+ when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
new file mode 100644
index 000000000..ba4d77617
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
@@ -0,0 +1,23 @@
+---
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ # Only check if docker upgrade is required if docker_upgrade is not
+ # already set to False.
+ - include: ../docker/upgrade_check.yml
+ when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool
+
+ # Additional checks for Atomic hosts:
+
+ - name: Determine available Docker
+ shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
+ register: g_atomic_docker_version_result
+ when: openshift.common.is_atomic | bool
+
+ - set_fact:
+ l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
+ when: openshift.common.is_atomic | bool
+
+ - fail:
+ msg: This playbook requires access to Docker 1.10 or later
+ when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.10','<')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
new file mode 100644
index 000000000..9a959a959
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
@@ -0,0 +1,37 @@
+---
+- name: Verify upgrade can proceed on first master
+ hosts: oo_first_master
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: >
+ This upgrade is only supported for origin, openshift-enterprise, and online
+ deployment types
+ when: deployment_type not in ['origin','openshift-enterprise', 'online']
+
+ # Error out in situations where the user has older versions specified in their
+ # inventory in any of the openshift_release, openshift_image_tag, and
+ # openshift_pkg_version variables. These must be removed or updated to proceed
+ # with upgrade.
+ # TODO: Should we block if you're *over* the next major release version as well?
+ - fail:
+ msg: >
+ openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+ valid version for a {{ openshift_upgrade_target }} upgrade
+ when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<')
+
+ - fail:
+ msg: >
+ openshift_image_tag is {{ openshift_image_tag }} which is not a
+ valid version for a {{ openshift_upgrade_target }} upgrade
+ when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<')
+
+ - set_fact:
+ openshift_release: "{{ openshift_release[1:] }}"
+ when: openshift_release is defined and openshift_release[0] == 'v'
+
+ - fail:
+ msg: >
+ openshift_release is {{ openshift_release }} which is not a
+ valid release for a {{ openshift_upgrade_target }} upgrade
+ when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml
new file mode 100644
index 000000000..354af3cde
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml
@@ -0,0 +1,13 @@
+---
+- name: Verify node processes
+ hosts: oo_nodes_to_config
+ roles:
+ - openshift_facts
+ - openshift_docker_facts
+ tasks:
+ - name: Ensure Node is running
+ service:
+ name: "{{ openshift.common.service_type }}-node"
+ state: started
+ enabled: yes
+ when: openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
new file mode 100644
index 000000000..9632626a4
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -0,0 +1,45 @@
+---
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ vars:
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ pre_tasks:
+ - fail:
+ msg: Verify OpenShift is already installed
+ when: openshift.common.version is not defined
+
+ - fail:
+ msg: Verify the correct version was found
+ when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version
+
+ - name: Clean package cache
+ command: "{{ ansible_pkg_mgr }} clean all"
+ when: not openshift.common.is_atomic | bool
+
+ - set_fact:
+ g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
+ when: not openshift.common.is_containerized | bool
+
+ - name: Verify containers are available for upgrade
+ command: >
+ docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+ when: openshift.common.is_containerized | bool
+
+ - name: Check latest available OpenShift RPM version
+ command: >
+ {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}"
+ failed_when: false
+ changed_when: false
+ register: avail_openshift_version
+ when: not openshift.common.is_containerized | bool
+
+ - name: Verify OpenShift RPMs are available for upgrade
+ fail:
+ msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required"
+ when: not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<')
+
+ - fail:
+ msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
+ when: deployment_type == 'origin' and openshift.common.version | version_compare(openshift_upgrade_min,'<')
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 8a2784fb4..2c641e21e 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -1,39 +1,93 @@
---
###############################################################################
-# The restart playbook should be run after this playbook completes.
+# Upgrade Masters
###############################################################################
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+- name: Evaluate additional groups for upgrade
+ hosts: localhost
+ connection: local
+ become: no
tasks:
- - name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+ - name: Evaluate etcd_hosts_to_backup
+ add_host:
+ name: "{{ item }}"
+ groups: etcd_hosts_to_backup
+ with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master
+
+- name: Backup etcd
+ hosts: etcd_hosts_to_backup
+ vars:
+ embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ # Ensure we persist the etcd role for this host in openshift_facts
+ - openshift_facts:
+ role: etcd
+ local_facts: {}
+ when: "'etcd' not in openshift"
+
+ - stat: path=/var/lib/openshift
+ register: var_lib_openshift
+
+ - stat: path=/var/lib/origin
+ register: var_lib_origin
+
+ - name: Create origin symlink if necessary
+ file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
+ when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
+
+ # TODO: replace shell module with command and update later checks
+ # We assume to be using the data dir for all backups.
+ - name: Check available disk space for etcd backup
+ shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
+ register: avail_disk
+
+ # TODO: replace shell module with command and update later checks
+ - name: Check current embedded etcd disk usage
+ shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
+ register: etcd_disk_usage
+ when: embedded_etcd | bool
+
+ - name: Abort if insufficient disk space for etcd backup
+ fail:
+ msg: >
+ {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ avail_disk.stdout }} Kb available.
+ when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+
+ - name: Install etcd (for etcdctl)
+ action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
+ when: not openshift.common.is_atomic | bool
+
+ - name: Generate etcd backup
+ command: >
+ etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
+ --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
- - debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+ - set_fact:
+ etcd_backup_complete: True
- - name: Remove unused Docker images for Docker 1.10+ migration
- shell: "docker rmi `docker images -aq`"
- # Will fail on images still in use:
- failed_when: false
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+ - name: Display location of etcd backup
+ debug:
+ msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
- - name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
- - debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+- name: Gate on etcd backup
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ etcd_backup_completed: "{{ hostvars
+ | oo_select_keys(groups.etcd_hosts_to_backup)
+ | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
+ - set_fact:
+ etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+ when: etcd_backup_failed | length > 0
-###############################################################################
-# Upgrade Masters
-###############################################################################
- name: Upgrade master packages
hosts: oo_masters_to_config
handlers:
@@ -57,7 +111,6 @@
# Create service signer cert when missing. Service signer certificate
# is added to master config in the master config hook for v3_3.
- include: create_service_signer_cert.yml
- when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
- name: Upgrade master config and systemd units
hosts: oo_masters_to_config
@@ -143,9 +196,9 @@
origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}"
ent_reconcile_bindings: true
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- # Similar to pre.yml, we don't want to upgrade docker during the openshift_cli role,
- # it will be updated when we perform node upgrade.
- docker_protect_installed_version: True
+ # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
+ # restart.
+ skip_docker_role: True
tasks:
- name: Verifying the correct commandline tools are available
shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}}
@@ -177,71 +230,6 @@
- set_fact:
reconcile_complete: True
-###############################################################################
-# Upgrade Nodes
-###############################################################################
-
-# Here we handle all tasks that might require a node evac. (upgrading docker, and the node service)
-- name: Perform upgrades that may require node evacuation
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_nodes_to_config
- serial: 1
- any_errors_fatal: true
- roles:
- - openshift_facts
- handlers:
- - include: ../../../../roles/openshift_node/handlers/main.yml
- static: yes
- tasks:
- # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
- # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
- # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- - name: Determine if node is currently scheduleable
- command: >
- {{ openshift.common.client_binary }} get node {{ openshift.node.nodename }} -o json
- register: node_output
- delegate_to: "{{ groups.oo_first_master.0 }}"
- changed_when: false
- when: inventory_hostname in groups.oo_nodes_to_config
-
- - set_fact:
- was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
- when: inventory_hostname in groups.oo_nodes_to_config
-
- - name: Mark unschedulable if host is a node
- command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=false
- delegate_to: "{{ groups.oo_first_master.0 }}"
- when: inventory_hostname in groups.oo_nodes_to_config
-
- - name: Evacuate Node for Kubelet upgrade
- command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --evacuate --force
- delegate_to: "{{ groups.oo_first_master.0 }}"
- when: inventory_hostname in groups.oo_nodes_to_config
-
- - include: docker/upgrade.yml
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
- - include: "{{ node_config_hook }}"
- when: node_config_hook is defined and inventory_hostname in groups.oo_nodes_to_config
-
- - include: rpm_upgrade.yml
- vars:
- component: "node"
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- when: inventory_hostname in groups.oo_nodes_to_config and not openshift.common.is_containerized | bool
-
- - include: containerized_node_upgrade.yml
- when: inventory_hostname in groups.oo_nodes_to_config and openshift.common.is_containerized | bool
-
- - meta: flush_handlers
-
- - name: Set node schedulability
- command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=true
- delegate_to: "{{ groups.oo_first_master.0 }}"
- when: inventory_hostname in groups.oo_nodes_to_config and was_schedulable | bool
-
-
##############################################################################
# Gate on reconcile
##############################################################################
@@ -259,3 +247,13 @@
- fail:
msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
when: reconcile_failed | length > 0
+
+- name: Upgrade Docker on dedicated containerized etcd hosts
+ hosts: oo_etcd_to_config:!oo_nodes_to_upgrade
+ serial: 1
+ any_errors_fatal: true
+ roles:
+ - openshift_facts
+ tasks:
+ - include: docker/upgrade.yml
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
new file mode 100644
index 000000000..9b572dcdf
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -0,0 +1,75 @@
+---
+- name: Evacuate and upgrade nodes
+ hosts: oo_nodes_to_upgrade
+ # This var must be set with -e on invocation, as it is not a per-host inventory var
+ # and is evaluated early. Values such as "20%" can also be used.
+ serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
+ any_errors_fatal: true
+ roles:
+ - openshift_facts
+ - docker
+ handlers:
+ - include: ../../../../roles/openshift_node/handlers/main.yml
+ static: yes
+ pre_tasks:
+ # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
+ # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
+ # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
+ - name: Determine if node is currently scheduleable
+ command: >
+ {{ openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json
+ register: node_output
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ changed_when: false
+ when: inventory_hostname in groups.oo_nodes_to_upgrade
+
+ - set_fact:
+ was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
+ when: inventory_hostname in groups.oo_nodes_to_upgrade
+
+ - name: Mark unschedulable if host is a node
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=false
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_upgrade
+ # NOTE: There is a transient "object has been modified" error here, allow a couple
+ # retries for a more reliable upgrade.
+ register: node_unsched
+ until: node_unsched.rc == 0
+ retries: 3
+ delay: 1
+
+ - name: Evacuate Node for Kubelet upgrade
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --evacuate --force
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_upgrade
+ tasks:
+ - include: docker/upgrade.yml
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
+
+ - include: "{{ node_config_hook }}"
+ when: node_config_hook is defined and inventory_hostname in groups.oo_nodes_to_upgrade
+
+ - include: rpm_upgrade.yml
+ vars:
+ component: "node"
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ when: inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool
+
+ - include: containerized_node_upgrade.yml
+ when: inventory_hostname in groups.oo_nodes_to_upgrade and openshift.common.is_containerized | bool
+
+ - meta: flush_handlers
+
+ - name: Set node schedulability
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=true
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_upgrade and was_schedulable | bool
+ register: node_sched
+ until: node_sched.rc == 0
+ retries: 3
+ delay: 1
+
+
diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml
index f4392173a..e3567552e 100644
--- a/playbooks/common/openshift-loadbalancer/config.yml
+++ b/playbooks/common/openshift-loadbalancer/config.yml
@@ -1,5 +1,16 @@
---
- name: Configure load balancers
hosts: oo_lb_to_config
+ vars:
+ openshift_loadbalancer_frontends: "{{ (openshift_master_api_port | default(8843)
+ | oo_openshift_loadbalancer_frontends(hostvars | oo_select_keys(groups['oo_masters']),
+ openshift_use_nuage | default(false),
+ nuage_mon_rest_server_port | default(none)))
+ + openshift_loadbalancer_additional_frontends | default([]) }}"
+ openshift_loadbalancer_backends: "{{ (openshift_master_api_port | default(8843)
+ | oo_openshift_loadbalancer_backends(hostvars | oo_select_keys(groups['oo_masters']),
+ openshift_use_nuage | default(false),
+ nuage_mon_rest_server_port | default(none)))
+ + openshift_loadbalancer_additional_backends | default([]) }}"
roles:
- role: openshift_loadbalancer
diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml
index da11ed0af..ed97d539c 100644
--- a/roles/docker/defaults/main.yml
+++ b/roles/docker/defaults/main.yml
@@ -1,2 +1 @@
---
-docker_protect_installed_version: False
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index a89f5b91a..7147aa2d4 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -11,7 +11,7 @@
- name: Error out if Docker pre-installed but too old
fail:
msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined and not docker_protect_installed_version | bool
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined
- name: Error out if requested Docker is too old
fail:
@@ -31,19 +31,19 @@
- name: Fail if Docker version requested but downgrade is required
fail:
msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>') and not docker_protect_installed_version | bool
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>')
# This involves an extremely slow migration process, users should instead run the
# Docker 1.10 upgrade playbook to accomplish this.
- name: Error out if attempting to upgrade Docker across the 1.10 boundary
fail:
msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=') and not docker_protect_installed_version | bool
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=')
# Make sure Docker is installed, but does not update a running version.
# Docker upgrades are handled by a separate playbook.
- name: Install Docker
- action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version if docker_version is defined and not docker_protect_installed_version | bool else '' }} state=present"
+ action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present"
when: not openshift.common.is_atomic | bool
- name: Start the Docker service
diff --git a/roles/nuage_common/defaults/main.yaml b/roles/nuage_common/defaults/main.yaml
index d285bdfa3..9b777213e 100644
--- a/roles/nuage_common/defaults/main.yaml
+++ b/roles/nuage_common/defaults/main.yaml
@@ -8,3 +8,6 @@ nuage_ca_serial: "{{ nuage_ca_dir }}/nuageMonCA.serial.txt"
nuage_master_mon_dir: /usr/share/nuage-openshift-monitor
nuage_node_plugin_dir: /usr/share/vsp-openshift
+
+nuage_mon_rest_server_port: "{{ nuage_openshift_monitor_rest_server_port | default('9443') }}"
+
diff --git a/roles/nuage_master/meta/main.yml b/roles/nuage_master/meta/main.yml
index fdead100c..51b89fbf6 100644
--- a/roles/nuage_master/meta/main.yml
+++ b/roles/nuage_master/meta/main.yml
@@ -13,8 +13,10 @@ galaxy_info:
- cloud
- system
dependencies:
-- role: nuage_ca
-- role: os_firewall
- os_firewall_allow:
- - service: openshift-monitor
- port: "{{ nuage_mon_rest_server_port }}/tcp"
+ - role: nuage_ca
+ - role: nuage_common
+ - role: openshift_etcd_client_certificates
+ - role: os_firewall
+ os_firewall_allow:
+ - service: openshift-monitor
+ port: "{{ nuage_mon_rest_server_port }}/tcp"
diff --git a/roles/nuage_master/templates/nuage-openshift-monitor.j2 b/roles/nuage_master/templates/nuage-openshift-monitor.j2
index 075de9d9e..b2539517b 100644
--- a/roles/nuage_master/templates/nuage-openshift-monitor.j2
+++ b/roles/nuage_master/templates/nuage-openshift-monitor.j2
@@ -30,4 +30,11 @@ logLevel: {{ nuage_mon_log_level }}
# Parameters related to the nuage monitor REST server
nuageMonServer:
URL: {{ nuage_mon_rest_server_url }}
- certificateDirectory: {{ cert_output_dir }}
+ certificateDirectory: {{ cert_output_dir }}
+# etcd config required for HA
+etcdClientConfig:
+ ca: {{ openshift_master_config_dir }}/{{ "ca.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }}
+ certFile: {{ openshift_master_config_dir }}/master.etcd-client.crt
+ keyFile: {{ openshift_master_config_dir }}/master.etcd-client.key
+ urls:
+ - {{ openshift.master.etcd_urls }}
diff --git a/roles/nuage_master/vars/main.yaml b/roles/nuage_master/vars/main.yaml
index 92e716a45..b395eba99 100644
--- a/roles/nuage_master/vars/main.yaml
+++ b/roles/nuage_master/vars/main.yaml
@@ -8,7 +8,6 @@ cert_output_dir: /usr/share/nuage-openshift-monitor
kube_config: /usr/share/nuage-openshift-monitor/nuage.kubeconfig
kubemon_yaml: /usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml
master_config_yaml: "{{ openshift_master_config_dir }}/master-config.yaml"
-nuage_mon_rest_server_port: "{{ nuage_openshift_monitor_rest_server_port | default('9443') }}"
nuage_mon_rest_server_url: "0.0.0.0:{{ nuage_mon_rest_server_port }}"
nuage_mon_rest_server_logdir: "{{ nuage_openshift_monitor_log_dir | default('/var/log/nuage-openshift-monitor') }}"
nuage_mon_log_level: "{{ nuage_openshift_monitor_log_level | default('3') }}"
diff --git a/roles/openshift_cli/meta/main.yml b/roles/openshift_cli/meta/main.yml
index 223cb768d..c1de367d9 100644
--- a/roles/openshift_cli/meta/main.yml
+++ b/roles/openshift_cli/meta/main.yml
@@ -13,5 +13,6 @@ galaxy_info:
- cloud
dependencies:
- role: openshift_docker
+ when: not skip_docker_role | default(False) | bool
- role: openshift_common
- role: openshift_cli_facts
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index d36926e08..9ffd399bc 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1609,7 +1609,6 @@ class OpenShiftFacts(object):
'docker',
'etcd',
'hosted',
- 'loadbalancer',
'master',
'node']
@@ -1865,13 +1864,6 @@ class OpenShiftFacts(object):
router=dict()
)
- if 'loadbalancer' in roles:
- loadbalancer = dict(frontend_port='8443',
- default_maxconn='20000',
- global_maxconn='20000',
- limit_nofile='100000')
- defaults['loadbalancer'] = loadbalancer
-
return defaults
def guess_host_provider(self):
diff --git a/roles/openshift_loadbalancer/README.md b/roles/openshift_loadbalancer/README.md
index 81fc282be..03e837e46 100644
--- a/roles/openshift_loadbalancer/README.md
+++ b/roles/openshift_loadbalancer/README.md
@@ -1,27 +1,68 @@
OpenShift HAProxy Loadbalancer
==============================
-TODO
+OpenShift HaProxy Loadbalancer Configuration
Requirements
------------
-TODO
+This role is intended to be applied to the [lb] host group which is
+separate from OpenShift infrastructure components.
+
+This role is not re-entrant. All haproxy configuration lives in a single file.
Role Variables
--------------
-TODO
+From this role:
+
+| Name | Default value | |
+|----------------------------------------|---------------|-------------------------------------------------------|
+| openshift_loadbalancer_limit_nofile | 100000 | Limit number of open files. |
+| openshift_loadbalancer_global_maxconn | 20000 | Maximum per-process number of concurrent connections. |
+| openshift_loadbalancer_default_maxconn | 20000 | Maximum per-process number of concurrent connections. |
+| openshift_loadbalancer_frontends | none | List of frontends. See example below. |
+| openshift_loadbalancer_backends | none | List of backends. See example below. |
Dependencies
------------
-TODO
+* openshift_facts
+* os_firewall
+* openshift_repos
Example Playbook
----------------
-TODO
+```
+- name: Configure loadbalancer hosts
+ hosts: lb
+ roles:
+ - role: openshift_loadbalancer
+ openshift_loadbalancer_frontends:
+ - name: atomic-openshift-api
+ mode: tcp
+ options:
+ - tcplog
+ binds:
+ - "*:8443"
+ default_backend: atomic-openshift-api
+ openshift_loadbalancer_backends:
+ - name: atomic-openshift-api
+ mode: tcp
+ option: tcplog
+ balance: source
+ servers:
+ - name: master1
+ address: "192.168.122.221:8443"
+ opts: check
+ - name: master2
+ address: "192.168.122.222:8443"
+ opts: check
+ - name: master3
+ address: "192.168.122.223:8443"
+ opts: check
+```
License
-------
diff --git a/roles/openshift_loadbalancer/meta/main.yml b/roles/openshift_loadbalancer/meta/main.yml
index ed846a1ba..e1d78cfd0 100644
--- a/roles/openshift_loadbalancer/meta/main.yml
+++ b/roles/openshift_loadbalancer/meta/main.yml
@@ -10,11 +10,11 @@ galaxy_info:
versions:
- 7
dependencies:
-- role: openshift_loadbalancer_facts
+- role: openshift_facts
- role: os_firewall
os_firewall_allow:
- service: haproxy stats
port: "9000/tcp"
- service: haproxy balance
- port: "{{ openshift.loadbalancer.frontend_port }}/tcp"
+ port: "{{ openshift_master_api_port | default(8443) }}/tcp"
- role: openshift_repos
diff --git a/roles/openshift_loadbalancer/tasks/main.yml b/roles/openshift_loadbalancer/tasks/main.yml
index 03a7c0e4a..bb4982e2d 100644
--- a/roles/openshift_loadbalancer/tasks/main.yml
+++ b/roles/openshift_loadbalancer/tasks/main.yml
@@ -7,15 +7,13 @@
file:
path: /etc/systemd/system/haproxy.service.d
state: directory
- when: "'limit_nofile' in openshift.loadbalancer"
- name: Configure the nofile limits for haproxy
ini_file:
dest: /etc/systemd/system/haproxy.service.d/limits.conf
section: Service
option: LimitNOFILE
- value: "{{ openshift.loadbalancer.limit_nofile }}"
- when: "'limit_nofile' in openshift.loadbalancer"
+ value: "{{ openshift_loadbalancer_limit_nofile | default(100000) }}"
notify: restart haproxy
register: nofile_limit_result
diff --git a/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2
index b9a279f5f..79e695001 100644
--- a/roles/openshift_loadbalancer/templates/haproxy.cfg.j2
+++ b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2
@@ -3,7 +3,7 @@
global
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
- maxconn {{ openshift.loadbalancer.global_maxconn }}
+ maxconn {{ openshift_loadbalancer_global_maxconn | default(20000) }}
user haproxy
group haproxy
daemon
@@ -32,14 +32,14 @@ defaults
timeout server 300s
timeout http-keep-alive 10s
timeout check 10s
- maxconn {{ openshift.loadbalancer.default_maxconn }}
+ maxconn {{ openshift_loadbalancer_default_maxconn | default(20000) }}
listen stats :9000
mode http
stats enable
stats uri /
-{% for frontend in openshift.loadbalancer.frontends %}
+{% for frontend in openshift_loadbalancer_frontends %}
frontend {{ frontend.name }}
{% for bind in frontend.binds %}
bind {{ bind }}
@@ -60,7 +60,7 @@ frontend {{ frontend.name }}
{% endif %}
{% endfor %}
-{% for backend in openshift.loadbalancer.backends %}
+{% for backend in openshift_loadbalancer_backends %}
backend {{ backend.name }}
balance {{ backend.balance }}
{% if 'mode' in backend %}
diff --git a/roles/openshift_loadbalancer_facts/README.md b/roles/openshift_loadbalancer_facts/README.md
deleted file mode 100644
index 57537cc03..000000000
--- a/roles/openshift_loadbalancer_facts/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-OpenShift HAProxy Loadbalancer Facts
-====================================
-
-TODO
-
-Requirements
-------------
-
-TODO
-
-Role Variables
---------------
-
-TODO
-
-Dependencies
-------------
-
-TODO
-
-Example Playbook
-----------------
-
-TODO
-
-License
--------
-
-Apache License, Version 2.0
-
-Author Information
-------------------
-
-Andrew Butcher (abutcher@redhat.com)
diff --git a/roles/openshift_loadbalancer_facts/meta/main.yml b/roles/openshift_loadbalancer_facts/meta/main.yml
deleted file mode 100644
index 4c5b6552b..000000000
--- a/roles/openshift_loadbalancer_facts/meta/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-galaxy_info:
- author: Andrew Butcher
- description: OpenShift loadbalancer facts
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 1.9
- platforms:
- - name: EL
- versions:
- - 7
-dependencies:
-- role: openshift_facts
diff --git a/roles/openshift_loadbalancer_facts/tasks/main.yml b/roles/openshift_loadbalancer_facts/tasks/main.yml
deleted file mode 100644
index dc244c0be..000000000
--- a/roles/openshift_loadbalancer_facts/tasks/main.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-- name: Set haproxy frontend port
- openshift_facts:
- role: loadbalancer
- local_facts:
- frontend_port: "{{ openshift_master_api_port | default(None) }}"
-
-- name: Set loadbalancer facts
- openshift_facts:
- role: loadbalancer
- local_facts:
- limit_nofile: "{{ openshift_loadbalancer_limit_nofile | default(None) }}"
- default_maxconn: "{{ openshift_loadbalancer_default_maxconn | default(None) }}"
- global_maxconn: "{{ openshift_loadbalancer_global_maxconn | default(None) }}"
- frontends:
- - name: atomic-openshift-api
- mode: tcp
- options:
- - tcplog
- binds:
- - "*:{{ openshift.loadbalancer.frontend_port }}"
- default_backend: atomic-openshift-api
- backends:
- - name: atomic-openshift-api
- mode: tcp
- option: tcplog
- balance: source
- servers: "{{ hostvars
- | oo_select_keys(groups['oo_masters'])
- | oo_haproxy_backend_masters(openshift.loadbalancer.frontend_port) }}"
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index ced3eb76f..4d45e8591 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -164,6 +164,9 @@ networkConfig:
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
serviceNetworkCIDR: {{ openshift.common.portal_net }}
externalIPNetworkCIDRs: {{ openshift_master_external_ip_network_cidrs | default(["0.0.0.0/0"]) | to_padded_yaml(1,2) }}
+{% if openshift_master_ingress_ip_network_cidr is defined %}
+ ingressIPNetworkCIDR: {{ openshift_master_ingress_ip_network_cidr }}
+{% endif %}
oauthConfig:
{% if 'oauth_always_show_provider_selection' in openshift.master %}
alwaysShowProviderSelection: {{ openshift.master.oauth_always_show_provider_selection }}
diff --git a/roles/openshift_version/meta/main.yml b/roles/openshift_version/meta/main.yml
index 70974da17..37c80c29e 100644
--- a/roles/openshift_version/meta/main.yml
+++ b/roles/openshift_version/meta/main.yml
@@ -15,4 +15,4 @@ dependencies:
- role: openshift_repos
- role: openshift_docker_facts
- role: docker
- when: openshift.common.is_containerized | default(False) | bool
+ when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index 43b1b3244..347ae7ec9 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -751,7 +751,9 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
hosts_to_run_on.remove(host)
# Handle the cases where we know about uninstalled systems
- if len(uninstalled_hosts) > 0:
+ # TODO: This logic is getting hard to understand.
+ # we should revise all this to be cleaner.
+ if not force and len(uninstalled_hosts) > 0:
for uninstalled_host in uninstalled_hosts:
click.echo("{} is currently uninstalled".format(uninstalled_host))
# Fall through