summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-x.papr.sh2
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--DEPLOYMENT_TYPES.md2
-rw-r--r--ansible.cfg4
-rw-r--r--docs/proposals/role_decomposition.md2
-rw-r--r--filter_plugins/oo_filters.py29
-rw-r--r--images/installer/root/exports/manifest.json2
-rwxr-xr-ximages/installer/root/usr/local/bin/run2
-rw-r--r--inventory/.gitignore (renamed from inventory/byo/.gitignore)0
-rw-r--r--inventory/README.md6
-rw-r--r--inventory/hosts.example (renamed from inventory/byo/hosts.example)4
-rw-r--r--inventory/hosts.glusterfs.external.example (renamed from inventory/byo/hosts.byo.glusterfs.external.example)10
-rw-r--r--inventory/hosts.glusterfs.mixed.example (renamed from inventory/byo/hosts.byo.glusterfs.mixed.example)10
-rw-r--r--inventory/hosts.glusterfs.native.example (renamed from inventory/byo/hosts.byo.glusterfs.native.example)10
-rw-r--r--inventory/hosts.glusterfs.registry-only.example (renamed from inventory/byo/hosts.byo.glusterfs.registry-only.example)8
-rw-r--r--inventory/hosts.glusterfs.storage-and-registry.example (renamed from inventory/byo/hosts.byo.glusterfs.storage-and-registry.example)8
-rw-r--r--inventory/hosts.openstack (renamed from inventory/byo/hosts.openstack)2
-rw-r--r--openshift-ansible.spec232
-rw-r--r--playbooks/adhoc/uninstall.yml2
-rw-r--r--playbooks/aws/README.md6
-rw-r--r--playbooks/aws/openshift-cluster/build_ami.yml6
-rw-r--r--playbooks/aws/openshift-cluster/hosted.yml12
-rw-r--r--playbooks/aws/openshift-cluster/install.yml18
-rw-r--r--playbooks/aws/openshift-cluster/prerequisites.yml6
-rw-r--r--playbooks/aws/openshift-cluster/provision_install.yml10
-rw-r--r--playbooks/byo/config.yml3
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift_facts.yml5
-rw-r--r--playbooks/byo/rhel_subscribe.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml11
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml34
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml30
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml24
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml46
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml42
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml24
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml46
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml42
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml24
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml38
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml34
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml24
-rw-r--r--playbooks/gcp/provision.yml2
-rw-r--r--playbooks/openshift-etcd/private/config.yml1
-rw-r--r--playbooks/openshift-etcd/private/embedded2external.yml2
-rw-r--r--playbooks/openshift-etcd/private/migrate.yml4
-rw-r--r--playbooks/openshift-glusterfs/README.md3
-rw-r--r--playbooks/openshift-hosted/redeploy-registry-certificates.yml4
-rw-r--r--playbooks/openshift-hosted/redeploy-router-certificates.yml4
-rw-r--r--playbooks/openshift-loadbalancer/private/config.yml7
-rw-r--r--playbooks/openshift-logging/config.yml6
-rw-r--r--playbooks/openshift-master/private/config.yml5
-rw-r--r--playbooks/openshift-master/private/redeploy-certificates.yml4
-rw-r--r--playbooks/openshift-master/private/redeploy-openshift-ca.yml6
-rw-r--r--playbooks/openshift-master/private/scaleup.yml4
-rw-r--r--playbooks/openshift-master/private/tasks/wire_aggregator.yml4
-rw-r--r--playbooks/openshift-master/redeploy-certificates.yml6
-rw-r--r--playbooks/openshift-master/redeploy-openshift-ca.yml4
-rw-r--r--playbooks/openshift-nfs/private/config.yml1
-rw-r--r--playbooks/openshift-node/private/configure_nodes.yml1
-rw-r--r--playbooks/openshift-node/private/containerized_nodes.yml1
-rw-r--r--playbooks/openshift-node/private/enable_excluders.yml1
-rw-r--r--playbooks/openshift-node/private/redeploy-certificates.yml4
-rw-r--r--playbooks/openshift-node/private/restart.yml8
-rw-r--r--playbooks/openshift-node/private/setup.yml1
-rw-r--r--playbooks/openshift-node/redeploy-certificates.yml6
-rw-r--r--playbooks/openstack/README.md2
-rw-r--r--playbooks/openstack/advanced-configuration.md4
-rw-r--r--playbooks/openstack/openshift-cluster/install.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/provision.yml4
-rw-r--r--playbooks/openstack/openshift-cluster/provision_install.yml6
-rw-r--r--playbooks/prerequisites.yml6
-rw-r--r--playbooks/redeploy-certificates.yml22
-rw-r--r--roles/container_runtime/defaults/main.yml4
-rw-r--r--roles/container_runtime/tasks/systemcontainer_crio.yml2
-rw-r--r--roles/container_runtime/templates/daemon.json4
-rw-r--r--roles/etcd/tasks/migration/add_ttls.yml2
-rw-r--r--roles/flannel/handlers/main.yml2
-rw-r--r--roles/kuryr/tasks/node.yaml4
-rw-r--r--roles/nuage_master/handlers/main.yaml4
-rw-r--r--roles/nuage_node/handlers/main.yaml2
-rw-r--r--roles/nuage_node/vars/main.yaml2
-rw-r--r--roles/openshift_builddefaults/tasks/main.yml5
-rw-r--r--roles/openshift_ca/tasks/main.yml2
-rw-r--r--roles/openshift_cli/library/openshift_container_binary_sync.py7
-rw-r--r--roles/openshift_cli/tasks/main.yml2
-rw-r--r--roles/openshift_docker_gc/templates/dockergc-ds.yaml.j22
-rw-r--r--roles/openshift_excluder/README.md5
-rw-r--r--roles/openshift_excluder/defaults/main.yml2
-rw-r--r--roles/openshift_excluder/meta/main.yml1
-rw-r--r--roles/openshift_excluder/tasks/main.yml5
-rw-r--r--roles/openshift_facts/defaults/main.yml6
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py179
-rw-r--r--roles/openshift_health_checker/HOWTO_CHECKS.md2
-rw-r--r--roles/openshift_health_checker/defaults/main.yml6
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_availability.py4
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py4
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py2
-rw-r--r--roles/openshift_health_checker/test/etcd_traffic_test.py5
-rw-r--r--roles/openshift_health_checker/test/ovs_version_test.py9
-rw-r--r--roles/openshift_health_checker/test/package_availability_test.py8
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py7
-rw-r--r--roles/openshift_hosted/tasks/registry.yml2
-rw-r--r--roles/openshift_hosted/templates/v3.8/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_hosted/templates/v3.8/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_hosted/templates/v3.9/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_hosted/templates/v3.9/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_logging/handlers/main.yml4
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j21
-rw-r--r--roles/openshift_management/README.md10
-rw-r--r--roles/openshift_management/defaults/main.yml2
-rw-r--r--roles/openshift_master/handlers/main.yml4
-rw-r--r--roles/openshift_master/tasks/main.yml16
-rw-r--r--roles/openshift_master/tasks/registry_auth.yml2
-rw-r--r--roles/openshift_master/tasks/restart.yml4
-rw-r--r--roles/openshift_master/tasks/system_container.yml6
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml28
-rw-r--r--roles/openshift_master/tasks/upgrade/rpm_upgrade.yml12
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j216
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j216
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j26
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j28
-rw-r--r--roles/openshift_metrics/handlers/main.yml4
-rw-r--r--roles/openshift_node/README.md6
-rw-r--r--roles/openshift_node/defaults/main.yml6
-rw-r--r--roles/openshift_node/handlers/main.yml2
-rw-r--r--roles/openshift_node/tasks/aws.yml2
-rw-r--r--roles/openshift_node/tasks/config.yml8
-rw-r--r--roles/openshift_node/tasks/config/configure-node-settings.yml2
-rw-r--r--roles/openshift_node/tasks/config/configure-proxy-settings.yml2
-rw-r--r--roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml2
-rw-r--r--roles/openshift_node/tasks/config/install-node-docker-service-file.yml2
-rw-r--r--roles/openshift_node/tasks/docker/upgrade.yml2
-rw-r--r--roles/openshift_node/tasks/install.yml4
-rw-r--r--roles/openshift_node/tasks/main.yml9
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml4
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml2
-rw-r--r--roles/openshift_node/tasks/upgrade.yml14
-rw-r--r--roles/openshift_node/tasks/upgrade/restart.yml8
-rw-r--r--roles/openshift_node/tasks/upgrade/rpm_upgrade.yml6
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.dep.service8
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service22
-rw-r--r--roles/openshift_provisioners/tasks/install_efs.yaml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml2
-rw-r--r--roles/openshift_version/defaults/main.yml8
-rw-r--r--roles/openshift_version/meta/main.yml1
-rw-r--r--roles/openshift_version/tasks/main.yml8
-rw-r--r--roles/openshift_version/tasks/set_version_rpm.yml6
-rw-r--r--roles/rhel_subscribe/README.md29
-rw-r--r--roles/rhel_subscribe/defaults/main.yml2
-rw-r--r--roles/rhel_subscribe/tasks/enterprise.yml21
-rw-r--r--roles/rhel_subscribe/tasks/main.yml29
-rw-r--r--setup.py56
-rw-r--r--[-rwxr-xr-x]test/integration/build-images.sh0
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml6
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml4
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml6
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml6
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml4
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml6
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml6
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml8
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml8
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml4
-rw-r--r--test/integration/openshift_health_checker/setup_container.yml7
-rw-r--r--[-rwxr-xr-x]test/integration/run-tests.sh0
-rw-r--r--tox.ini3
192 files changed, 968 insertions, 900 deletions
diff --git a/.papr.sh b/.papr.sh
index 80453d4b2..c7b06f059 100755
--- a/.papr.sh
+++ b/.papr.sh
@@ -35,7 +35,7 @@ trap upload_journals ERR
# run the actual installer
# FIXME: override openshift_image_tag defined in the inventory until
# https://github.com/openshift/openshift-ansible/issues/4478 is fixed.
-ansible-playbook -vvv -i .papr.inventory playbooks/byo/config.yml -e "openshift_image_tag=$OPENSHIFT_IMAGE_TAG"
+ansible-playbook -vvv -i .papr.inventory playbooks/deploy_cluster.yml -e "openshift_image_tag=$OPENSHIFT_IMAGE_TAG"
### DISABLING TESTS FOR NOW, SEE:
### https://github.com/openshift/openshift-ansible/pull/6132
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 9db0b5c98..70f88dcd6 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.8.0-0.13.0 ./
+3.9.0-0.3.0 ./
diff --git a/DEPLOYMENT_TYPES.md b/DEPLOYMENT_TYPES.md
index e52e47202..3788e9bfb 100644
--- a/DEPLOYMENT_TYPES.md
+++ b/DEPLOYMENT_TYPES.md
@@ -10,7 +10,7 @@ The table below outlines the defaults per `openshift_deployment_type`:
| openshift_deployment_type | origin | openshift-enterprise |
|-----------------------------------------------------------------|------------------------------------------|----------------------------------------|
-| **openshift.common.service_type** (also used for package names) | origin | atomic-openshift |
+| **openshift_service_type** (also used for package names) | origin | atomic-openshift |
| **openshift.common.config_base** | /etc/origin | /etc/origin |
| **openshift_data_dir** | /var/lib/origin | /var/lib/origin |
| **openshift.master.registry_url openshift.node.registry_url** | openshift/origin-${component}:${version} | openshift3/ose-${component}:${version} |
diff --git a/ansible.cfg b/ansible.cfg
index 9900d28f8..e4d72553e 100644
--- a/ansible.cfg
+++ b/ansible.cfg
@@ -30,8 +30,8 @@ inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt
# work around privilege escalation timeouts in ansible:
timeout = 30
-# Uncomment to use the provided BYO inventory
-#inventory = inventory/byo/hosts.example
+# Uncomment to use the provided example inventory
+#inventory = inventory/hosts.example
[inventory]
# fail more helpfully when the inventory file does not parse (Ansible 2.4+)
diff --git a/docs/proposals/role_decomposition.md b/docs/proposals/role_decomposition.md
index 6434e24e7..37d080d5c 100644
--- a/docs/proposals/role_decomposition.md
+++ b/docs/proposals/role_decomposition.md
@@ -262,7 +262,7 @@ dependencies:
- name: "Create logging project"
command: >
- {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}}
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}}
when: not ansible_check_mode and "not found" in logging_project_result.stderr
- name: Create logging cert directory
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 45795d097..3eaf2aed5 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -375,6 +375,13 @@ def oo_split(string, separator=','):
return string.split(separator)
+def oo_list_to_dict(lst, separator='='):
+ """ This converts a list of ["k=v"] to a dictionary {k: v}.
+ """
+ kvs = [i.split(separator) for i in lst]
+ return {k: v for k, v in kvs}
+
+
def oo_haproxy_backend_masters(hosts, port):
""" This takes an array of dicts and returns an array of dicts
to be used as a backend for the haproxy role
@@ -690,26 +697,6 @@ def to_padded_yaml(data, level=0, indent=2, **kw):
raise errors.AnsibleFilterError('Failed to convert: %s' % my_e)
-def oo_openshift_env(hostvars):
- ''' Return facts which begin with "openshift_" and translate
- legacy facts to their openshift_env counterparts.
-
- Ex: hostvars = {'openshift_fact': 42,
- 'theyre_taking_the_hobbits_to': 'isengard'}
- returns = {'openshift_fact': 42}
- '''
- if not issubclass(type(hostvars), dict):
- raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
-
- facts = {}
- regex = re.compile('^openshift_.*')
- for key in hostvars:
- if regex.match(key):
- facts[key] = hostvars[key]
-
- return facts
-
-
def oo_31_rpm_rename_conversion(rpms, openshift_version=None):
""" Filters a list of 3.0 rpms and return the corresponding 3.1 rpms
names with proper version (if provided)
@@ -989,6 +976,7 @@ class FilterModule(object):
"oo_combine_dict": oo_combine_dict,
"oo_dict_to_list_of_dict": oo_dict_to_list_of_dict,
"oo_split": oo_split,
+ "oo_list_to_dict": oo_list_to_dict,
"oo_filter_list": oo_filter_list,
"oo_parse_heat_stack_outputs": oo_parse_heat_stack_outputs,
"oo_parse_named_certificates": oo_parse_named_certificates,
@@ -996,7 +984,6 @@ class FilterModule(object):
"oo_pretty_print_cluster": oo_pretty_print_cluster,
"oo_generate_secret": oo_generate_secret,
"oo_nodes_with_label": oo_nodes_with_label,
- "oo_openshift_env": oo_openshift_env,
"oo_31_rpm_rename_conversion": oo_31_rpm_rename_conversion,
"oo_pods_match_component": oo_pods_match_component,
"oo_get_hosts_from_hostvars": oo_get_hosts_from_hostvars,
diff --git a/images/installer/root/exports/manifest.json b/images/installer/root/exports/manifest.json
index 8b984d7a3..53696b03e 100644
--- a/images/installer/root/exports/manifest.json
+++ b/images/installer/root/exports/manifest.json
@@ -4,7 +4,7 @@
"OPTS": "",
"VAR_LIB_OPENSHIFT_INSTALLER" : "/var/lib/openshift-installer",
"VAR_LOG_OPENSHIFT_LOG": "/var/log/ansible.log",
- "PLAYBOOK_FILE": "/usr/share/ansible/openshift-ansible/playbooks/byo/config.yml",
+ "PLAYBOOK_FILE": "/usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.yml",
"HOME_ROOT": "/root",
"ANSIBLE_CONFIG": "/usr/share/atomic-openshift-utils/ansible.cfg",
"INVENTORY_FILE": "/dev/null"
diff --git a/images/installer/root/usr/local/bin/run b/images/installer/root/usr/local/bin/run
index cd38a6ff0..67cf7dfde 100755
--- a/images/installer/root/usr/local/bin/run
+++ b/images/installer/root/usr/local/bin/run
@@ -18,7 +18,7 @@ INVENTORY="$(mktemp)"
if [[ -v INVENTORY_FILE ]]; then
# Make a copy so that ALLOW_ANSIBLE_CONNECTION_LOCAL below
# does not attempt to modify the original
- cp -a ${INVENTORY_FILE} ${INVENTORY}
+ cp ${INVENTORY_FILE} ${INVENTORY}
elif [[ -v INVENTORY_DIR ]]; then
INVENTORY="$(mktemp -d)"
cp -R ${INVENTORY_DIR}/* ${INVENTORY}
diff --git a/inventory/byo/.gitignore b/inventory/.gitignore
index 6ff331c7e..6ff331c7e 100644
--- a/inventory/byo/.gitignore
+++ b/inventory/.gitignore
diff --git a/inventory/README.md b/inventory/README.md
index 5e26e3c32..2e348194f 100644
--- a/inventory/README.md
+++ b/inventory/README.md
@@ -1,5 +1 @@
-# OpenShift Ansible inventory config files
-
-You can install OpenShift on:
-
-* [BYO](byo/) (Bring your own), use this inventory config file to install OpenShift on your pre-existing hosts
+# OpenShift Ansible example inventory config files
diff --git a/inventory/byo/hosts.example b/inventory/hosts.example
index e3b56d7a1..c18a53671 100644
--- a/inventory/byo/hosts.example
+++ b/inventory/hosts.example
@@ -1,4 +1,4 @@
-# This is an example of a bring your own (byo) host inventory
+# This is an example of an OpenShift-Ansible host inventory
# Create an OSEv3 group that contains the masters and nodes groups
[OSEv3:children]
@@ -1047,7 +1047,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# name and password AND are trying to use integration scripts.
#
# For example, adding this cluster as a container provider,
-# playbooks/byo/openshift-management/add_container_provider.yml
+# playbooks/openshift-management/add_container_provider.yml
#openshift_management_username: admin
#openshift_management_password: smartvm
diff --git a/inventory/byo/hosts.byo.glusterfs.external.example b/inventory/hosts.glusterfs.external.example
index acf68266e..bf2557cf0 100644
--- a/inventory/byo/hosts.byo.glusterfs.external.example
+++ b/inventory/hosts.glusterfs.external.example
@@ -1,11 +1,11 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
+# This is an example of an OpenShift-Ansible host inventory for a cluster
# with natively hosted, containerized GlusterFS storage.
#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
+# This inventory may be used with the deploy_cluster.yml playbook to deploy a new
# cluster with GlusterFS storage, which will use that storage to create a
# volume that will provide backend storage for a hosted Docker registry.
#
-# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# This inventory may also be used with openshift-glusterfs/config.yml to
# deploy GlusterFS storage on an existing cluster. With this playbook, the
# registry backend volume will be created but the administrator must then
# either deploy a hosted registry or change an existing hosted registry to use
@@ -13,7 +13,7 @@
#
# There are additional configuration parameters that can be specified to
# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# documentation in playbooks/openshift-glusterfs/README.md and
# roles/openshift_storage_glusterfs/README.md for additional details.
[OSEv3:children]
@@ -44,7 +44,7 @@ node2 openshift_schedulable=True
master
# Specify the glusterfs group, which contains the nodes of the external
-# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
+# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
# and "glusterfs_devices" variables defined.
#
# The first variable indicates the hostname of the external GLusterFS node,
diff --git a/inventory/byo/hosts.byo.glusterfs.mixed.example b/inventory/hosts.glusterfs.mixed.example
index a559dc377..8a20a037e 100644
--- a/inventory/byo/hosts.byo.glusterfs.mixed.example
+++ b/inventory/hosts.glusterfs.mixed.example
@@ -1,11 +1,11 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
+# This is an example of an OpenShift-Ansible host inventory for a cluster
# with natively hosted, containerized GlusterFS storage.
#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
+# This inventory may be used with the deploy_cluster.yml playbook to deploy a new
# cluster with GlusterFS storage, which will use that storage to create a
# volume that will provide backend storage for a hosted Docker registry.
#
-# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# This inventory may also be used with openshift-glusterfs/config.yml to
# deploy GlusterFS storage on an existing cluster. With this playbook, the
# registry backend volume will be created but the administrator must then
# either deploy a hosted registry or change an existing hosted registry to use
@@ -13,7 +13,7 @@
#
# There are additional configuration parameters that can be specified to
# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# documentation in playbooks/openshift-glusterfs/README.md and
# roles/openshift_storage_glusterfs/README.md for additional details.
[OSEv3:children]
@@ -47,7 +47,7 @@ node2 openshift_schedulable=True
master
# Specify the glusterfs group, which contains the nodes of the external
-# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
+# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
# and "glusterfs_devices" variables defined.
#
# The first variable indicates the hostname of the external GLusterFS node,
diff --git a/inventory/byo/hosts.byo.glusterfs.native.example b/inventory/hosts.glusterfs.native.example
index ca4765c53..59acf1194 100644
--- a/inventory/byo/hosts.byo.glusterfs.native.example
+++ b/inventory/hosts.glusterfs.native.example
@@ -1,16 +1,16 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
+# This is an example of an OpenShift-Ansible host inventory for a cluster
# with natively hosted, containerized GlusterFS storage for applications. It
-# will also autmatically create a StorageClass for this purpose.
+# will also automatically create a StorageClass for this purpose.
#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
+# This inventory may be used with the deploy_cluster.yml playbook to deploy a new
# cluster with GlusterFS storage.
#
-# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# This inventory may also be used with openshift-glusterfs/config.yml to
# deploy GlusterFS storage on an existing cluster.
#
# There are additional configuration parameters that can be specified to
# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# documentation in playbooks/openshift-glusterfs/README.md and
# roles/openshift_storage_glusterfs/README.md for additional details.
[OSEv3:children]
diff --git a/inventory/byo/hosts.byo.glusterfs.registry-only.example b/inventory/hosts.glusterfs.registry-only.example
index 32040f593..6f33e9f6d 100644
--- a/inventory/byo/hosts.byo.glusterfs.registry-only.example
+++ b/inventory/hosts.glusterfs.registry-only.example
@@ -1,12 +1,12 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
+# This is an example of an OpenShift-Ansible host inventory for a cluster
# with natively hosted, containerized GlusterFS storage for exclusive use
# as storage for a natively hosted Docker registry.
#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
+# This inventory may be used with the deploy_cluster.yml playbook to deploy a new
# cluster with GlusterFS storage, which will use that storage to create a
# volume that will provide backend storage for a hosted Docker registry.
#
-# This inventory may also be used with byo/openshift-glusterfs/registry.yml to
+# This inventory may also be used with openshift-glusterfs/registry.yml to
# deploy GlusterFS storage on an existing cluster. With this playbook, the
# registry backend volume will be created but the administrator must then
# either deploy a hosted registry or change an existing hosted registry to use
@@ -14,7 +14,7 @@
#
# There are additional configuration parameters that can be specified to
# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# documentation in playbooks/openshift-glusterfs/README.md and
# roles/openshift_storage_glusterfs/README.md for additional details.
[OSEv3:children]
diff --git a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example b/inventory/hosts.glusterfs.storage-and-registry.example
index 9bd37cbf6..1f3a4282a 100644
--- a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example
+++ b/inventory/hosts.glusterfs.storage-and-registry.example
@@ -1,12 +1,12 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
+# This is an example of an OpenShift-Ansible host inventory for a cluster
# with natively hosted, containerized GlusterFS storage for both general
# application use and a natively hosted Docker registry. It will also create a
# StorageClass for the general storage.
#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
+# This inventory may be used with the deploy_cluster.yml playbook to deploy a new
# cluster with GlusterFS storage.
#
-# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# This inventory may also be used with openshift-glusterfs/config.yml to
# deploy GlusterFS storage on an existing cluster. With this playbook, the
# registry backend volume will be created but the administrator must then
# either deploy a hosted registry or change an existing hosted registry to use
@@ -14,7 +14,7 @@
#
# There are additional configuration parameters that can be specified to
# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# documentation in playbooks/openshift-glusterfs/README.md and
# roles/openshift_storage_glusterfs/README.md for additional details.
[OSEv3:children]
diff --git a/inventory/byo/hosts.openstack b/inventory/hosts.openstack
index c648078c4..d928c2b86 100644
--- a/inventory/byo/hosts.openstack
+++ b/inventory/hosts.openstack
@@ -1,4 +1,4 @@
-# This is an example of a bring your own (byo) host inventory
+# This is an example of an OpenShift-Ansible host inventory
# Create an OSEv3 group that contains the masters and nodes groups
[OSEv3:children]
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 7d543afdd..54fe962fb 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.9.0
-Release: 0.0.0%{?dist}
+Release: 0.3.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -67,7 +67,7 @@ rm -f %{buildroot}%{python_sitelib}/openshift_ansible/gce
# openshift-ansible-docs install
# Install example inventory into docs/examples
mkdir -p docs/example-inventories
-cp inventory/byo/* docs/example-inventories/
+cp inventory/* docs/example-inventories/
# openshift-ansible-files install
cp -rp files %{buildroot}%{_datadir}/ansible/%{name}/
@@ -285,14 +285,140 @@ Atomic OpenShift Utilities includes
%changelog
+* Mon Dec 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.3.0
+- remove integration tests from tox (lmeyer@redhat.com)
+- correct ansible-playbook command syntax (jdiaz@redhat.com)
+- Add openshift_facts to upgrade plays for service_type (mgugino@redhat.com)
+- Check for openshift attribute before using it during CNS install.
+ (jmencak@redhat.com)
+
+* Mon Dec 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.2.0
+- GlusterFS: Add playbook doc note (jarrpa@redhat.com)
+- Fix openshift hosted registry rollout (rteague@redhat.com)
+- Remove container_runtime from the openshift_version (sdodson@redhat.com)
+
+* Fri Dec 08 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.1.0
+- Cleanup byo references (rteague@redhat.com)
+- openshift_node: reintroduce restart of CRI-O. (gscrivan@redhat.com)
+- container-engine: skip openshift_docker_log_driver when it is False
+ (gscrivan@redhat.com)
+- container-engine: log-opts is a dictionary in the daemon.json file
+ (gscrivan@redhat.com)
+- openshift_version: add dependency to openshift_facts (gscrivan@redhat.com)
+- openshift_version: define openshift_use_crio_only (gscrivan@redhat.com)
+- openshift_version: add dependency to container_runtime (gscrivan@redhat.com)
+- crio: define and use l_is_node_system_container (gscrivan@redhat.com)
+- Update deprecation checks - include: (rteague@redhat.com)
+- Add os_firewall to prerequisites.yml (mgugino@redhat.com)
+- add 3.8 templates for gluster ep and svc (lmeyer@redhat.com)
+- Remove openshift.common.service_type (mgugino@redhat.com)
+- Remove unused openshift_env_structures and openshift_env (mgugino@redhat.com)
+- Fix incorrect register name master registry auth (mgugino@redhat.com)
+- Include Deprecation: Convert to import_playbook (rteague@redhat.com)
+- add 3.8 templates for gluster ep and svc (m.judeikis@gmail.com)
+- Remove all uses of openshift.common.admin_binary (sdodson@redhat.com)
+- Implement container_runtime playbooks and changes (mgugino@redhat.com)
+- Playbook Consolidation - byo/config.yml (rteague@redhat.com)
+- openshift_logging_kibana: fix mixing paren (lmeyer@redhat.com)
+- Fix ami building. (kwoodson@redhat.com)
+- Include Deprecation: Convert to include_tasks (rteague@redhat.com)
+- Add missing symlinks in openshift-logging (rteague@redhat.com)
+- Fix generate_pv_pvcs_list plugin undef (mgugino@redhat.com)
+- Playbook Consolidation - etcd Upgrade (rteague@redhat.com)
+- bug 1519622. Disable rollback of ES DCs (jcantril@redhat.com)
+- Remove all references to pacemaker (pcs, pcsd) and
+ openshift.master.cluster_method. (abutcher@redhat.com)
+- Remove entry point files no longer needed by CI (rteague@redhat.com)
+- Don't check for the deployment_type (tomas@sedovic.cz)
+- Get the correct value out of openshift_release (tomas@sedovic.cz)
+- Fix oreg_auth_credentials_create register var (mgugino@redhat.com)
+- Fix and cleanup not required dns bits (bdobreli@redhat.com)
+- Fix hosted vars (mgugino@redhat.com)
+- Remove duplicate init import in network_manager.yml (rteague@redhat.com)
+- Document testing repos for dev purposes (bdobreli@redhat.com)
+- Remove unused protected_facts_to_overwrite (mgugino@redhat.com)
+- Use openshift testing repos for openstack (bdobreli@redhat.com)
+- Use openshift_release instead of ose_version (tomas@sedovic.cz)
+- Remove the ose_version check (tomas@sedovic.cz)
+- Allow number of retries in openshift_management to be configurable
+ (ealfassa@redhat.com)
+- Bumping to 3.9 (smunilla@redhat.com)
+- Cleanup unused openstack provider code (bdobreli@redhat.com)
+- Adding 3.9 tito releaser (smunilla@redhat.com)
+- Implement container runtime role (mgugino@redhat.com)
+- Fix glusterfs checkpoint info (rteague@redhat.com)
+- storage_glusterfs: fix typo (lmeyer@redhat.com)
+- Playbook Consolidation - Redeploy Certificates (rteague@redhat.com)
+- Fix tox (tomas@sedovic.cz)
+- Remove shell environment lookup (tomas@sedovic.cz)
+- Revert "Fix syntax error caused by an extra paren" (tomas@sedovic.cz)
+- Revert "Fix the env lookup fallback in rhel_subscribe" (tomas@sedovic.cz)
+- Remove reading shell environment in rhel_subscribe (tomas@sedovic.cz)
+- retry package operations (lmeyer@redhat.com)
+- Add v3.9 support (sdodson@redhat.com)
+- Playbook Consolidation - openshift-logging (rteague@redhat.com)
+- Do not escalate privileges in jks generation tasks (iacopo.rozzo@amadeus.com)
+- Fix inventory symlinks in origin-ansible container. (dgoodwin@redhat.com)
+- Initial upgrade for scale groups. (kwoodson@redhat.com)
+- Update the doc text (tomas@sedovic.cz)
+- Optionally subscribe OpenStack RHEL nodes (tomas@sedovic.cz)
+- Fix the env lookup fallback in rhel_subscribe (tomas@sedovic.cz)
+- Fix syntax error caused by an extra paren (tomas@sedovic.cz)
+- Fix no_log warnings for custom module (mgugino@redhat.com)
+- Add external_svc_subnet for k8s loadbalancer type service
+ (jihoon.o@samsung.com)
+- Remove openshift_facts project_cfg_facts (mgugino@redhat.com)
+- Remove dns_port fact (mgugino@redhat.com)
+- Bug 1512793- Fix idempotence issues in ASB deploy (fabian@fabianism.us)
+- Remove unused task file from etcd role (rteague@redhat.com)
+- fix type in authroize (jchaloup@redhat.com)
+- Use IP addresses for OpenStack nodes (tomas@sedovic.cz)
+- Update prometheus to 2.0.0 GA (zgalor@redhat.com)
+- remove schedulable from openshift_facts (mgugino@redhat.com)
+- inventory: Add example for service catalog vars (smilner@redhat.com)
+- Correct usage of include_role (rteague@redhat.com)
+- Remove openshift.common.cli_image (mgugino@redhat.com)
+- Fix openshift_env fact creation within openshift_facts. (abutcher@redhat.com)
+- Combine openshift_node and openshift_node_dnsmasq (mgugino@redhat.com)
+- GlusterFS: Remove extraneous line from glusterblock template
+ (jarrpa@redhat.com)
+- Remove openshift_clock from meta depends (mgugino@redhat.com)
+- Simplify is_master_system_container logic (mgugino@redhat.com)
+- dist.iteritems() no longer exists in Python 3. (jpazdziora@redhat.com)
+- Remove spurrious file committed by error (diego.abelenda@camptocamp.com)
+- Fix name of the service pointed to by hostname
+ (diego.abelenda@camptocamp.com)
+- Missed the default value after the variable name change...
+ (diego.abelenda@camptocamp.com)
+- Change the name of the variable and explicitely document the names
+ (diego.abelenda@camptocamp.com)
+- Allow to set the hostname for routes to prometheus and alertmanager
+ (diego.abelenda@camptocamp.com)
+- Allow openshift_install_examples to be false (michael.fraenkel@gmail.com)
+- Include Deprecation - openshift-service-catalog (rteague@redhat.com)
+- Remove is_openvswitch_system_container from facts (mgugino@redhat.com)
+- Workaround the fact that package state=present with dnf fails for already
+ installed but excluded packages. (jpazdziora@redhat.com)
+- With dnf repoquery and excluded packages, --disableexcludes=all is needed to
+ list the package with --installed. (jpazdziora@redhat.com)
+- Add support for external glusterfs as registry backend (m.judeikis@gmail.com)
+- cri-o: honor additional and insecure registries again (gscrivan@redhat.com)
+- docker: copy Docker metadata to the alternative storage path
+ (gscrivan@redhat.com)
+- Add check for gluterFS DS to stop restarts (m.judeikis@gmail.com)
+- Bug 1514417 - Adding correct advertise-client-urls (shawn.hurley21@gmail.com)
+- Uninstall tuned-profiles-atomic-openshift-node as defined in origin.spec
+ (jmencak@redhat.com)
+- Mod startup script to publish all frontend binds (cwilkers@redhat.com)
+
* Thu Nov 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.13.0
--
+-
* Thu Nov 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.12.0
--
+-
* Thu Nov 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.11.0
--
+-
* Thu Nov 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.10.0
- tox.ini: simplify unit test reqs (lmeyer@redhat.com)
@@ -341,16 +467,16 @@ Atomic OpenShift Utilities includes
- Include Deprecation - Init Playbook Paths (rteague@redhat.com)
* Mon Nov 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.8.0
--
+-
* Mon Nov 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.7.0
--
+-
* Mon Nov 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.6.0
--
+-
* Sun Nov 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.5.0
--
+-
* Sun Nov 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.4.0
- bug 1498398. Enclose content between store tag (rromerom@redhat.com)
@@ -643,10 +769,10 @@ Atomic OpenShift Utilities includes
- Allow cluster IP for docker-registry service to be set (hansmi@vshn.ch)
* Thu Nov 09 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.5-1
--
+-
* Wed Nov 08 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.4-1
--
+-
* Wed Nov 08 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.3-1
- Adding configuration for keeping transient namespace on error.
@@ -816,10 +942,10 @@ Atomic OpenShift Utilities includes
- GlusterFS: Remove image option from heketi command (jarrpa@redhat.com)
* Mon Oct 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.187.0
--
+-
* Sun Oct 29 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.186.0
--
+-
* Sat Oct 28 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.185.0
- bug 1506073. Lower cpu request for logging when it exceeds limit
@@ -849,7 +975,7 @@ Atomic OpenShift Utilities includes
- Refactor health check playbooks (rteague@redhat.com)
* Fri Oct 27 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.183.0
--
+-
* Thu Oct 26 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.182.0
- Fixing documentation for the cert_key_path variable name.
@@ -923,16 +1049,16 @@ Atomic OpenShift Utilities includes
(hansmi@vshn.ch)
* Mon Oct 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.175.0
--
+-
* Sun Oct 22 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.174.0
--
+-
* Sun Oct 22 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.173.0
--
+-
* Sun Oct 22 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.172.0
--
+-
* Sat Oct 21 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.171.0
- Use "requests" for CPU resources instead of limits
@@ -956,16 +1082,16 @@ Atomic OpenShift Utilities includes
(dymurray@redhat.com)
* Fri Oct 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.168.0
--
+-
* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.167.0
--
+-
* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.166.0
--
+-
* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.165.0
--
+-
* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.164.0
- Change to service-signer.crt for template_service_broker CA_BUNDLE
@@ -988,7 +1114,7 @@ Atomic OpenShift Utilities includes
- Remove unneeded master config updates during upgrades (mgugino@redhat.com)
* Wed Oct 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.161.0
--
+-
* Wed Oct 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.160.0
- Fix pvc selector default to be empty dict instead of string
@@ -1030,16 +1156,16 @@ Atomic OpenShift Utilities includes
(jchaloup@redhat.com)
* Sun Oct 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.155.0
--
+-
* Sat Oct 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.154.0
--
+-
* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.153.0
- default groups.oo_new_etcd_to_config to an empty list (jchaloup@redhat.com)
* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.152.0
--
+-
* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.151.0
- updated dynamic provision section for openshift metrics to support storage
@@ -1448,7 +1574,7 @@ Atomic OpenShift Utilities includes
- oc_atomic_container: support Skopeo output (gscrivan@redhat.com)
* Tue Sep 05 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.125.0
--
+-
* Tue Sep 05 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.124.0
- Fix ansible_syntax check (rteague@redhat.com)
@@ -1475,7 +1601,7 @@ Atomic OpenShift Utilities includes
(miciah.masters@gmail.com)
* Wed Aug 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.123.0
--
+-
* Wed Aug 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.122.0
- Update openshift_hosted_routers example to be in ini format.
@@ -1537,10 +1663,10 @@ Atomic OpenShift Utilities includes
- Add missing hostnames to registry cert (sdodson@redhat.com)
* Fri Aug 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.115.0
--
+-
* Fri Aug 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.114.0
--
+-
* Fri Aug 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.113.0
- openshift_version: enterprise accepts new style pre-release
@@ -1558,13 +1684,13 @@ Atomic OpenShift Utilities includes
- Setup tuned profiles in /etc/tuned (jmencak@redhat.com)
* Thu Aug 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.109.0
--
+-
* Thu Aug 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.108.0
--
+-
* Thu Aug 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.107.0
--
+-
* Thu Aug 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.106.0
- Add dotnet 2.0 to v3.6 (sdodson@redhat.com)
@@ -1601,13 +1727,13 @@ Atomic OpenShift Utilities includes
(sdodson@redhat.com)
* Sat Aug 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.103.0
--
+-
* Fri Aug 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.102.0
--
+-
* Fri Aug 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.101.0
--
+-
* Fri Aug 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.100.0
- Change memory requests and limits units (mak@redhat.com)
@@ -1906,13 +2032,13 @@ Atomic OpenShift Utilities includes
(kwoodson@redhat.com)
* Mon Jul 17 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.152-1
--
+-
* Sun Jul 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.151-1
--
+-
* Sun Jul 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.150-1
--
+-
* Sat Jul 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.149-1
- Config was missed before replace. (jkaur@redhat.com)
@@ -1935,7 +2061,7 @@ Atomic OpenShift Utilities includes
- GlusterFS: Fix SSH-based heketi configuration (jarrpa@redhat.com)
* Wed Jul 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.143-1
--
+-
* Wed Jul 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.142-1
- add scheduled pods check (jvallejo@redhat.com)
@@ -1960,7 +2086,7 @@ Atomic OpenShift Utilities includes
- updating fetch tasks to be flat paths (ewolinet@redhat.com)
* Mon Jul 10 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.140-1
--
+-
* Sat Jul 08 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.139-1
- increase implicit 300s default timeout to explicit 600s (jchaloup@redhat.com)
@@ -2008,7 +2134,7 @@ Atomic OpenShift Utilities includes
- Fully qualify ocp ansible_service_broker_image_prefix (sdodson@redhat.com)
* Wed Jul 05 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.134-1
--
+-
* Tue Jul 04 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.133-1
- etcd, syscontainer: fix copy of existing datastore (gscrivan@redhat.com)
@@ -2020,7 +2146,7 @@ Atomic OpenShift Utilities includes
- Fixes to storage migration (sdodson@redhat.com)
* Mon Jul 03 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.132-1
--
+-
* Sun Jul 02 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.131-1
- Fix upgrade (sdodson@redhat.com)
@@ -2161,7 +2287,7 @@ Atomic OpenShift Utilities includes
- bug 1457642. Use same SG index to avoid seeding timeout (jcantril@redhat.com)
* Wed Jun 21 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.122-1
--
+-
* Tue Jun 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.121-1
- Updating default from null to "" (ewolinet@redhat.com)
@@ -2205,7 +2331,7 @@ Atomic OpenShift Utilities includes
- CloudForms 4.5 templates (simaishi@redhat.com)
* Fri Jun 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.114-1
--
+-
* Fri Jun 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.113-1
- Make rollout status check best-effort, add poll (skuznets@redhat.com)
@@ -2267,7 +2393,7 @@ Atomic OpenShift Utilities includes
- singletonize some role tasks that repeat a lot (lmeyer@redhat.com)
* Wed Jun 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.109-1
--
+-
* Wed Jun 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.108-1
- Upgraded Calico to 2.2.1 Release (vincent.schwarzer@yahoo.de)
@@ -2323,7 +2449,7 @@ Atomic OpenShift Utilities includes
- Install default storageclass in AWS & GCE envs (hekumar@redhat.com)
* Fri Jun 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.98-1
--
+-
* Fri Jun 09 2017 Scott Dodson <sdodson@redhat.com> 3.6.97-1
- Updated to using oo_random_word for secret gen (ewolinet@redhat.com)
@@ -2355,7 +2481,7 @@ Atomic OpenShift Utilities includes
loopback kubeconfigs. (abutcher@redhat.com)
* Tue Jun 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.2-1
--
+-
* Tue Jun 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.1-1
- Updating image for registry_console (ewolinet@redhat.com)
@@ -2602,13 +2728,13 @@ Atomic OpenShift Utilities includes
- Fix additional master cert & client config creation. (abutcher@redhat.com)
* Tue May 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.62-1
--
+-
* Tue May 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.61-1
--
+-
* Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.60-1
--
+-
* Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.59-1
- Updating logging and metrics to restart api, ha and controllers when updating
@@ -2621,10 +2747,10 @@ Atomic OpenShift Utilities includes
- Moving Dockerfile content to images dir (jupierce@redhat.com)
* Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.57-1
--
+-
* Sun May 07 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.56-1
--
+-
* Sat May 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.55-1
- Fix 1448368, and some other minors issues (ghuang@redhat.com)
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index ed7a7bd1a..9f044c089 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -123,7 +123,7 @@
- origin-clients
- origin-node
- origin-sdn-ovs
- - tuned-profiles-openshift-node
+ - tuned-profiles-atomic-openshift-node
- tuned-profiles-origin-node
register: result
until: result | success
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
index 417fb539a..d203b9cda 100644
--- a/playbooks/aws/README.md
+++ b/playbooks/aws/README.md
@@ -75,7 +75,7 @@ If customization is required for the instances, scale groups, or any other confi
In order to create the bootstrap-able AMI we need to create a basic openshift-ansible inventory. This enables us to create the AMI using the openshift-ansible node roles. This inventory should not include any hosts, but certain variables should be defined in the appropriate groups, just as deploying a cluster
using the normal openshift-ansible method. See provisioning-inventory.example.ini for an example.
-There are more examples of cluster inventory settings [`here`](../../inventory/byo/).
+There are more examples of cluster inventory settings [`here`](../../inventory/).
#### Step 0 (optional)
@@ -134,11 +134,11 @@ At this point we have successfully created the infrastructure including the mast
Now it is time to install Openshift using the openshift-ansible installer. This can be achieved by running the following playbook:
```
-$ ansible-playbook -i inventory.yml install.yml @provisioning_vars.yml
+$ ansible-playbook -i inventory.yml install.yml -e @provisioning_vars.yml
```
This playbook accomplishes the following:
1. Builds a dynamic inventory file by querying AWS.
-2. Runs the [`byo`](../../common/openshift-cluster/config.yml)
+2. Runs the [`deploy_cluster.yml`](../deploy_cluster.yml)
Once this playbook completes, the cluster masters should be installed and configured.
diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml
index 5815c4975..5bf4f652a 100644
--- a/playbooks/aws/openshift-cluster/build_ami.yml
+++ b/playbooks/aws/openshift-cluster/build_ami.yml
@@ -17,7 +17,7 @@
- name: openshift_aws_region
msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
-- include: provision_instance.yml
+- import_playbook: provision_instance.yml
vars:
openshift_aws_node_group_type: compute
@@ -33,8 +33,8 @@
# This is the part that installs all of the software and configs for the instance
# to become a node.
-- include: ../../openshift-node/private/image_prep.yml
+- import_playbook: ../../openshift-node/private/image_prep.yml
-- include: seal_ami.yml
+- import_playbook: seal_ami.yml
vars:
openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}"
diff --git a/playbooks/aws/openshift-cluster/hosted.yml b/playbooks/aws/openshift-cluster/hosted.yml
index 1dabae357..9d9ed29de 100644
--- a/playbooks/aws/openshift-cluster/hosted.yml
+++ b/playbooks/aws/openshift-cluster/hosted.yml
@@ -1,19 +1,19 @@
---
-- include: ../../openshift-hosted/private/config.yml
+- import_playbook: ../../openshift-hosted/private/config.yml
-- include: ../../openshift-metrics/private/config.yml
+- import_playbook: ../../openshift-metrics/private/config.yml
when: openshift_metrics_install_metrics | default(false) | bool
-- include: ../../openshift-logging/private/config.yml
+- import_playbook: ../../openshift-logging/private/config.yml
when: openshift_logging_install_logging | default(false) | bool
-- include: ../../openshift-prometheus/private/config.yml
+- import_playbook: ../../openshift-prometheus/private/config.yml
when: openshift_hosted_prometheus_deploy | default(false) | bool
-- include: ../../openshift-service-catalog/private/config.yml
+- import_playbook: ../../openshift-service-catalog/private/config.yml
when: openshift_enable_service_catalog | default(false) | bool
-- include: ../../openshift-management/private/config.yml
+- import_playbook: ../../openshift-management/private/config.yml
when: openshift_management_install_management | default(false) | bool
- name: Print deprecated variable warning message if necessary
diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml
index f8206529a..b03fb0b7f 100644
--- a/playbooks/aws/openshift-cluster/install.yml
+++ b/playbooks/aws/openshift-cluster/install.yml
@@ -16,31 +16,31 @@
tasks_from: master_facts.yml
- name: run the init
- include: ../../init/main.yml
+ import_playbook: ../../init/main.yml
- name: perform the installer openshift-checks
- include: ../../openshift-checks/private/install.yml
+ import_playbook: ../../openshift-checks/private/install.yml
- name: etcd install
- include: ../../openshift-etcd/private/config.yml
+ import_playbook: ../../openshift-etcd/private/config.yml
- name: include nfs
- include: ../../openshift-nfs/private/config.yml
+ import_playbook: ../../openshift-nfs/private/config.yml
when: groups.oo_nfs_to_config | default([]) | count > 0
- name: include loadbalancer
- include: ../../openshift-loadbalancer/private/config.yml
+ import_playbook: ../../openshift-loadbalancer/private/config.yml
when: groups.oo_lb_to_config | default([]) | count > 0
- name: include openshift-master config
- include: ../../openshift-master/private/config.yml
+ import_playbook: ../../openshift-master/private/config.yml
- name: include master additional config
- include: ../../openshift-master/private/additional_config.yml
+ import_playbook: ../../openshift-master/private/additional_config.yml
- name: include master additional config
- include: ../../openshift-node/private/config.yml
+ import_playbook: ../../openshift-node/private/config.yml
- name: include openshift-glusterfs
- include: ../../openshift-glusterfs/private/config.yml
+ import_playbook: ../../openshift-glusterfs/private/config.yml
when: groups.oo_glusterfs_to_config | default([]) | count > 0
diff --git a/playbooks/aws/openshift-cluster/prerequisites.yml b/playbooks/aws/openshift-cluster/prerequisites.yml
index f5eb01b14..0afcce331 100644
--- a/playbooks/aws/openshift-cluster/prerequisites.yml
+++ b/playbooks/aws/openshift-cluster/prerequisites.yml
@@ -1,6 +1,6 @@
---
-- include: provision_vpc.yml
+- import_playbook: provision_vpc.yml
-- include: provision_ssh_keypair.yml
+- import_playbook: provision_ssh_keypair.yml
-- include: provision_sec_group.yml
+- import_playbook: provision_sec_group.yml
diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml
index 78dd6a49b..f98f5be9a 100644
--- a/playbooks/aws/openshift-cluster/provision_install.yml
+++ b/playbooks/aws/openshift-cluster/provision_install.yml
@@ -4,16 +4,16 @@
# this playbook is run with the following parameters:
# ansible-playbook -i openshift-ansible-inventory provision_install.yml
- name: Include the provision.yml playbook to create cluster
- include: provision.yml
+ import_playbook: provision.yml
- name: Include the install.yml playbook to install cluster on masters
- include: install.yml
+ import_playbook: install.yml
- name: provision the infra/compute playbook to install node resources
- include: provision_nodes.yml
+ import_playbook: provision_nodes.yml
- name: Include the accept.yml playbook to accept nodes into the cluster
- include: accept.yml
+ import_playbook: accept.yml
- name: Include the hosted.yml playbook to finish the hosted configuration
- include: hosted.yml
+ import_playbook: hosted.yml
diff --git a/playbooks/byo/config.yml b/playbooks/byo/config.yml
deleted file mode 100644
index 4b74e5bce..000000000
--- a/playbooks/byo/config.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# TODO (rteague): Temporarily leaving this playbook to allow CI tests to operate until CI jobs are updated.
-- import_playbook: ../deploy_cluster.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
index c46b22331..76308465c 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
@@ -1,5 +1,5 @@
---
# Playbook to upgrade Docker to the max allowable version for an OpenShift cluster.
-- include: ../../../../init/evaluate_groups.yml
+- import_playbook: ../../../../init/evaluate_groups.yml
-- include: ../../../../common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/docker/docker_upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
index c880fe7f7..0effc68bf 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -2,4 +2,4 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index aeec5f5cc..ebced5413 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -11,4 +11,4 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index 4664a9a2b..f2e97fc01 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -4,4 +4,4 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
index cbb89bc4d..f6fedfdff 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -2,4 +2,4 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index 1adfbdec0..b8b5f5762 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -11,4 +11,4 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
index b4da18281..c63f11b30 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -4,4 +4,4 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml
index 14b0f85d4..23a3fcbb5 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml
@@ -4,4 +4,4 @@
#
# Upgrades scale group nodes only.
#
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_scale_group.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/upgrade_scale_group.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml
index f7e5dd1d2..c4094aa7e 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -2,4 +2,4 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index cc04d81c1..5a3aa6288 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -11,4 +11,4 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
index 37a9f69bb..74981cc31 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
@@ -4,4 +4,4 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml
index e8f9d94e2..a2a9d59f2 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -2,4 +2,4 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index acb4195e3..869e185af 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -11,4 +11,4 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
index df19097e1..a5867434b 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
@@ -4,4 +4,4 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
index 29e0ebe8d..85a65b7e1 100644
--- a/playbooks/byo/openshift_facts.yml
+++ b/playbooks/byo/openshift_facts.yml
@@ -1,13 +1,12 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
- name: Gather Cluster facts
hosts: oo_all_hosts
roles:
- openshift_facts
tasks:
- - openshift_facts:
- openshift_env: "{{ hostvars[inventory_hostname] | oo_openshift_env }}"
+ - openshift_facts: {}
register: result
- debug:
var: result
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index 261143080..5a877809a 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -1,5 +1,5 @@
---
-- include: ../init/evaluate_groups.yml
+- import_playbook: ../init/evaluate_groups.yml
- name: Subscribe hosts, update repos and update OS packages
hosts: oo_all_hosts
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
index 800621857..33ed6a283 100644
--- a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
+++ b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
@@ -5,7 +5,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
r_openshift_excluder_verify_upgrade: true
r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
r_openshift_excluder_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
index a66301c0d..ab3171c9a 100644
--- a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
+++ b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
@@ -5,7 +5,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
r_openshift_excluder_verify_upgrade: true
r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
r_openshift_excluder_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 6d4ddf011..fcb828808 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -1,11 +1,11 @@
---
-- include: ../../../../init/evaluate_groups.yml
+- import_playbook: ../../../../init/evaluate_groups.yml
vars:
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
- name: Check for appropriate Docker versions
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
@@ -19,7 +19,7 @@
msg: Cannot upgrade Docker on Atomic operating systems.
when: openshift.common.is_atomic | bool
- - include: upgrade_check.yml
+ - include_tasks: upgrade_check.yml
when: docker_upgrade is not defined or docker_upgrade | bool
@@ -32,6 +32,7 @@
any_errors_fatal: true
roles:
+ - openshift_facts
- lib_openshift
tasks:
@@ -51,7 +52,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ openshift.common.client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
register: l_docker_upgrade_drain_result
@@ -59,7 +60,7 @@
retries: 60
delay: 60
- - include: tasks/upgrade.yml
+ - include_tasks: tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool
- name: Set node schedulability
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
index 3b779becb..dbc4f39c7 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
@@ -11,9 +11,9 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
failed_when: false
when: openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
index 83be290e6..4856a4b51 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
@@ -4,9 +4,9 @@
- name: Stop containerized services
service: name={{ item }} state=stopped
with_items:
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
- etcd_container
- openvswitch
failed_when: false
@@ -44,5 +44,5 @@
register: result
until: result | success
-- include: restart.yml
+- include_tasks: restart.yml
when: not skip_docker_restart | default(False) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 9981d905b..5454a6680 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -1,11 +1,11 @@
---
-- include: ../../../init/evaluate_groups.yml
+- import_playbook: ../../../init/evaluate_groups.yml
vars:
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
-- include: ../../../init/facts.yml
+- import_playbook: ../../../init/facts.yml
- name: Ensure firewall is not switched during upgrade
hosts: oo_all_hosts
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index c458184c9..344ddea3c 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -114,7 +114,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
post_tasks:
# Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond:
- name: grep pluginOrderOverride
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
index 6d8503879..18a08eb99 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
@@ -1,7 +1,7 @@
---
# Only check if docker upgrade is required if docker_upgrade is not
# already set to False.
-- include: ../../docker/upgrade_check.yml
+- include_tasks: ../../docker/upgrade_check.yml
when:
- docker_upgrade is not defined or (docker_upgrade | bool)
- not (openshift.common.is_atomic | bool)
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
index 6a5bc24f7..bef95546d 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
@@ -13,21 +13,21 @@
block:
- set_fact:
master_services:
- - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift_service_type }}-master"
# In case of the non-ha to ha upgrade.
- - name: Check if the {{ openshift.common.service_type }}-master-api.service exists
+ - name: Check if the {{ openshift_service_type }}-master-api.service exists
command: >
- systemctl list-units {{ openshift.common.service_type }}-master-api.service --no-legend
+ systemctl list-units {{ openshift_service_type }}-master-api.service --no-legend
register: master_api_service_status
- set_fact:
master_services:
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
when:
- master_api_service_status.stdout_lines | length > 0
- - (openshift.common.service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
+ - (openshift_service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
- name: Ensure Master is running
service:
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 84b740227..96f970506 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -21,7 +21,7 @@
block:
- name: Check latest available OpenShift RPM version
repoquery:
- name: "{{ openshift.common.service_type }}"
+ name: "{{ openshift_service_type }}"
ignore_excluders: true
register: repoquery_out
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 0d3fed212..37fc8a0f6 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -17,7 +17,7 @@
embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- name: Backup and upgrade etcd
- include: ../../../openshift-etcd/private/upgrade_main.yml
+ import_playbook: ../../../openshift-etcd/private/upgrade_main.yml
# Create service signer cert when missing. Service signer certificate
# is added to master config in the master_config_upgrade hook.
@@ -30,7 +30,7 @@
register: service_signer_cert_stat
changed_when: false
-- include: create_service_signer_cert.yml
+- import_playbook: create_service_signer_cert.yml
# oc adm migrate storage should be run prior to etcd v3 upgrade
# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
@@ -71,7 +71,7 @@
- debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
- - include: "{{ openshift_master_upgrade_pre_hook }}"
+ - include_tasks: "{{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
- include_role:
@@ -82,20 +82,20 @@
- debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}"
when: openshift_master_upgrade_hook is defined
- - include: "{{ openshift_master_upgrade_hook }}"
+ - include_tasks: "{{ openshift_master_upgrade_hook }}"
when: openshift_master_upgrade_hook is defined
- - include: ../../../openshift-master/private/tasks/restart_hosts.yml
+ - include_tasks: ../../../openshift-master/private/tasks/restart_hosts.yml
when: openshift.common.rolling_restart_mode == 'system'
- - include: ../../../openshift-master/private/tasks/restart_services.yml
+ - include_tasks: ../../../openshift-master/private/tasks/restart_services.yml
when: openshift.common.rolling_restart_mode == 'services'
# Run the post-upgrade hook if defined:
- debug: msg="Running master post-upgrade hook {{ openshift_master_upgrade_post_hook }}"
when: openshift_master_upgrade_post_hook is defined
- - include: "{{ openshift_master_upgrade_post_hook }}"
+ - include_tasks: "{{ openshift_master_upgrade_post_hook }}"
when: openshift_master_upgrade_post_hook is defined
- name: Post master upgrade - Upgrade clusterpolicies storage
@@ -275,7 +275,7 @@
roles:
- openshift_facts
tasks:
- - include: docker/tasks/upgrade.yml
+ - include_tasks: docker/tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
- name: Drain and upgrade master nodes
@@ -305,7 +305,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_control_plane_drain_result
until: not l_upgrade_control_plane_drain_result | failed
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 75ffd3fe9..f7a85545b 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -26,7 +26,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
until: not l_upgrade_nodes_drain_result | failed
@@ -45,7 +45,6 @@
name: openshift_excluder
vars:
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Set node schedulability
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
index d9ce3a7e3..47410dff3 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
@@ -13,7 +13,7 @@
- "'oo_sg_new_nodes' not in groups or groups.oo_sg_new_nodes|length == 0"
- name: initialize upgrade bits
- include: init.yml
+ import_playbook: init.yml
- name: Drain and upgrade nodes
hosts: oo_sg_current_nodes
@@ -42,7 +42,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
until: not l_upgrade_nodes_drain_result | failed
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index 5f9c56867..9f9399ff9 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -2,7 +2,7 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -17,7 +17,7 @@
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
@@ -43,27 +43,27 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -73,29 +73,29 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -107,12 +107,12 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_6/master_config_upgrade.yml"
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index 1aac3d014..7374160d6 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -11,7 +11,7 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -25,7 +25,7 @@
openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
@@ -51,23 +51,23 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -77,29 +77,29 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -111,10 +111,10 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_6/master_config_upgrade.yml"
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index 306b76422..de9bf098e 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -4,7 +4,7 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -18,7 +18,7 @@
openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
@@ -44,19 +44,19 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -74,25 +74,25 @@
- fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
when: openshift.common.version != openshift_version
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -104,6 +104,6 @@
- name: Cleanup unused Docker images
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index 6d4949542..9ec788e76 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -2,7 +2,7 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -17,11 +17,11 @@
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
-- include: ../pre/verify_etcd3_backend.yml
+- import_playbook: ../pre/verify_etcd3_backend.yml
tags:
- pre_upgrade
@@ -47,27 +47,27 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -77,29 +77,29 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -111,9 +111,9 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -121,16 +121,18 @@
- name: Cycle all controller services to force new leader election mode
hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index 0a592896b..ad67b6c44 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -11,7 +11,7 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -25,11 +25,11 @@
openshift_upgrade_min: '3.6'
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
-- include: ../pre/verify_etcd3_backend.yml
+- import_playbook: ../pre/verify_etcd3_backend.yml
tags:
- pre_upgrade
@@ -55,23 +55,23 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -81,29 +81,29 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -115,9 +115,9 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -125,14 +125,16 @@
- name: Cycle all controller services to force new leader election mode
hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
index b381d606a..27a7f67ea 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -4,7 +4,7 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -18,7 +18,7 @@
openshift_upgrade_min: '3.6'
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
@@ -44,19 +44,19 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -74,25 +74,25 @@
- fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
when: openshift.common.version != openshift_version
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -104,6 +104,6 @@
- name: Cleanup unused Docker images
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
index e7d7756d1..60ec79df5 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -2,7 +2,7 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -17,11 +17,11 @@
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
-- include: ../pre/verify_etcd3_backend.yml
+- import_playbook: ../pre/verify_etcd3_backend.yml
tags:
- pre_upgrade
@@ -47,27 +47,27 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -77,29 +77,29 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -111,9 +111,9 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -121,16 +121,18 @@
- name: Cycle all controller services to force new leader election mode
hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index be362e3ff..c1a3f64f2 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -11,7 +11,7 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -25,11 +25,11 @@
openshift_upgrade_min: '3.7'
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
-- include: ../pre/verify_etcd3_backend.yml
+- import_playbook: ../pre/verify_etcd3_backend.yml
tags:
- pre_upgrade
@@ -55,23 +55,23 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -81,29 +81,29 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -115,9 +115,9 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -125,14 +125,16 @@
- name: Cycle all controller services to force new leader election mode
hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
index 6e68116b0..dd716b241 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
@@ -4,7 +4,7 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -18,7 +18,7 @@
openshift_upgrade_min: '3.7'
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
@@ -44,19 +44,19 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -74,25 +74,25 @@
- fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
when: openshift.common.version != openshift_version
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -104,6 +104,6 @@
- name: Cleanup unused Docker images
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
index 94c16cae0..1e704b66c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -2,7 +2,7 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -17,11 +17,11 @@
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
-- include: ../pre/verify_etcd3_backend.yml
+- import_playbook: ../pre/verify_etcd3_backend.yml
tags:
- pre_upgrade
@@ -47,27 +47,27 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -83,29 +83,29 @@
# docker is configured and running.
skip_docker_role: True
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - import_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -117,9 +117,9 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -127,6 +127,8 @@
- name: Cycle all controller services to force new leader election mode
hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- name: Stop {{ openshift.common.service_type }}-master-controllers
systemd:
@@ -137,6 +139,6 @@
name: "{{ openshift.common.service_type }}-master-controllers"
state: started
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index 2045f6379..a9689da1f 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -11,7 +11,7 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -25,11 +25,11 @@
openshift_upgrade_min: '3.7'
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
-- include: ../pre/verify_etcd3_backend.yml
+- import_playbook: ../pre/verify_etcd3_backend.yml
tags:
- pre_upgrade
@@ -55,23 +55,23 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -87,29 +87,29 @@
# docker is configured and running.
skip_docker_role: True
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -121,9 +121,9 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -131,6 +131,8 @@
- name: Cycle all controller services to force new leader election mode
hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- name: Stop {{ openshift.common.service_type }}-master-controllers
systemd:
@@ -141,4 +143,4 @@
name: "{{ openshift.common.service_type }}-master-controllers"
state: started
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
index 6134f8653..d95cfa4e1 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
@@ -4,7 +4,7 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -18,7 +18,7 @@
openshift_upgrade_min: '3.7'
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
@@ -44,19 +44,19 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -80,25 +80,25 @@
- fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
when: openshift.common.version != openshift_version
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -110,6 +110,6 @@
- name: Cleanup unused Docker images
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/gcp/provision.yml b/playbooks/gcp/provision.yml
index 9887f09f2..6016e6a78 100644
--- a/playbooks/gcp/provision.yml
+++ b/playbooks/gcp/provision.yml
@@ -10,4 +10,4 @@
name: openshift_gcp
- name: run the cluster deploy
- include: ../deploy_cluster.yml
+ import_playbook: ../deploy_cluster.yml
diff --git a/playbooks/openshift-etcd/private/config.yml b/playbooks/openshift-etcd/private/config.yml
index 3d6c79834..35407969e 100644
--- a/playbooks/openshift-etcd/private/config.yml
+++ b/playbooks/openshift-etcd/private/config.yml
@@ -19,7 +19,6 @@
hosts: oo_etcd_to_config
any_errors_fatal: true
roles:
- - role: os_firewall
- role: openshift_clock
- role: openshift_etcd
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
diff --git a/playbooks/openshift-etcd/private/embedded2external.yml b/playbooks/openshift-etcd/private/embedded2external.yml
index c7a532622..be177b714 100644
--- a/playbooks/openshift-etcd/private/embedded2external.yml
+++ b/playbooks/openshift-etcd/private/embedded2external.yml
@@ -22,7 +22,7 @@
name: openshift_master
tasks_from: check_master_api_is_ready.yml
- set_fact:
- master_service: "{{ openshift.common.service_type + '-master' }}"
+ master_service: "{{ openshift_service_type + '-master' }}"
embedded_etcd_backup_suffix: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- debug:
msg: "master service name: {{ master_service }}"
diff --git a/playbooks/openshift-etcd/private/migrate.yml b/playbooks/openshift-etcd/private/migrate.yml
index 834bd242d..9ddb4afe2 100644
--- a/playbooks/openshift-etcd/private/migrate.yml
+++ b/playbooks/openshift-etcd/private/migrate.yml
@@ -28,8 +28,8 @@
tasks:
- set_fact:
master_services:
- - "{{ openshift.common.service_type + '-master-controllers' }}"
- - "{{ openshift.common.service_type + '-master-api' }}"
+ - "{{ openshift_service_type + '-master-controllers' }}"
+ - "{{ openshift_service_type + '-master-api' }}"
- debug:
msg: "master service name: {{ master_services }}"
- name: Stop masters
diff --git a/playbooks/openshift-glusterfs/README.md b/playbooks/openshift-glusterfs/README.md
index f62aea229..107bbfff6 100644
--- a/playbooks/openshift-glusterfs/README.md
+++ b/playbooks/openshift-glusterfs/README.md
@@ -26,6 +26,9 @@ file. The hosts in this group are the nodes of the GlusterFS cluster.
devices but you must specify the following variables in `[OSEv3:vars]`:
* `openshift_storage_glusterfs_is_missing=False`
* `openshift_storage_glusterfs_heketi_is_missing=False`
+ * If GlusterFS will be running natively, the target hosts must also be listed
+ in the `nodes` group. They must also already be configured as OpenShift
+ nodes before this playbook runs.
By default, pods for a native GlusterFS cluster will be created in the
`default` namespace. To change this, specify
diff --git a/playbooks/openshift-hosted/redeploy-registry-certificates.yml b/playbooks/openshift-hosted/redeploy-registry-certificates.yml
index 65fb0abda..518a1d624 100644
--- a/playbooks/openshift-hosted/redeploy-registry-certificates.yml
+++ b/playbooks/openshift-hosted/redeploy-registry-certificates.yml
@@ -1,4 +1,4 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/redeploy-registry-certificates.yml
+- import_playbook: private/redeploy-registry-certificates.yml
diff --git a/playbooks/openshift-hosted/redeploy-router-certificates.yml b/playbooks/openshift-hosted/redeploy-router-certificates.yml
index 8dc052751..a74dd8c79 100644
--- a/playbooks/openshift-hosted/redeploy-router-certificates.yml
+++ b/playbooks/openshift-hosted/redeploy-router-certificates.yml
@@ -1,4 +1,4 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/redeploy-router-certificates.yml
+- import_playbook: private/redeploy-router-certificates.yml
diff --git a/playbooks/openshift-loadbalancer/private/config.yml b/playbooks/openshift-loadbalancer/private/config.yml
index 78fe663db..2636d857e 100644
--- a/playbooks/openshift-loadbalancer/private/config.yml
+++ b/playbooks/openshift-loadbalancer/private/config.yml
@@ -11,13 +11,6 @@
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
-- name: Configure firewall load balancers
- hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config
- vars:
- openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
- roles:
- - role: os_firewall
-
- name: Configure load balancers
hosts: oo_lb_to_config
vars:
diff --git a/playbooks/openshift-logging/config.yml b/playbooks/openshift-logging/config.yml
index 8837a2d32..83d330284 100644
--- a/playbooks/openshift-logging/config.yml
+++ b/playbooks/openshift-logging/config.yml
@@ -1,9 +1,9 @@
---
#
# This playbook is a preview of upcoming changes for installing
-# Hosted logging on. See inventory/byo/hosts.*.example for the
+# Hosted logging on. See inventory/hosts.example for the
# currently supported method.
#
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/config.yml
+- import_playbook: private/config.yml
diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml
index afb8d6bd1..9f6d5afcc 100644
--- a/playbooks/openshift-master/private/config.yml
+++ b/playbooks/openshift-master/private/config.yml
@@ -19,7 +19,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Gather and set facts for master hosts
hosts: oo_masters_to_config
@@ -180,7 +179,6 @@
| oo_collect('openshift.common.ip') | default([]) | join(',')
}}"
roles:
- - role: os_firewall
- role: openshift_master_facts
- role: openshift_hosted_facts
- role: openshift_clock
@@ -228,6 +226,8 @@
- name: Configure API Aggregation on masters
hosts: oo_masters
serial: 1
+ roles:
+ - role: openshift_facts
tasks:
- include_tasks: tasks/wire_aggregator.yml
@@ -237,7 +237,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Master Install Checkpoint End
hosts: all
diff --git a/playbooks/openshift-master/private/redeploy-certificates.yml b/playbooks/openshift-master/private/redeploy-certificates.yml
index 3bd38a61d..c0f75ae80 100644
--- a/playbooks/openshift-master/private/redeploy-certificates.yml
+++ b/playbooks/openshift-master/private/redeploy-certificates.yml
@@ -1,6 +1,6 @@
---
-- include: certificates-backup.yml
+- import_playbook: certificates-backup.yml
-- include: certificates.yml
+- import_playbook: certificates.yml
vars:
openshift_certificates_redeploy: true
diff --git a/playbooks/openshift-master/private/redeploy-openshift-ca.yml b/playbooks/openshift-master/private/redeploy-openshift-ca.yml
index 59657574a..9f5502141 100644
--- a/playbooks/openshift-master/private/redeploy-openshift-ca.yml
+++ b/playbooks/openshift-master/private/redeploy-openshift-ca.yml
@@ -56,7 +56,7 @@
- groups.oo_etcd_to_config | default([]) | length == 0
- (g_master_config_output.content|b64decode|from_yaml).etcdConfig.servingInfo.clientCA != 'ca-bundle.crt'
# Set servingInfo.clientCA to client-ca-bundle.crt in order to roll the CA certificate.
- # This change will be reverted in playbooks/byo/openshift-cluster/redeploy-certificates.yml
+ # This change will be reverted in playbooks/redeploy-certificates.yml
- modify_yaml:
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: servingInfo.clientCA
@@ -207,7 +207,7 @@
group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}"
with_items: "{{ client_users }}"
-- include: restart.yml
+- import_playbook: restart.yml
# Do not restart masters when master or etcd certificates were previously expired.
when:
# masters
@@ -272,7 +272,7 @@
state: absent
changed_when: false
-- include: ../../openshift-node/private/restart.yml
+- import_playbook: ../../openshift-node/private/restart.yml
# Do not restart nodes when node, master or etcd certificates were previously expired.
when:
# nodes
diff --git a/playbooks/openshift-master/private/scaleup.yml b/playbooks/openshift-master/private/scaleup.yml
index 8229eccfa..007b23ea3 100644
--- a/playbooks/openshift-master/private/scaleup.yml
+++ b/playbooks/openshift-master/private/scaleup.yml
@@ -20,11 +20,11 @@
- restart master controllers
handlers:
- name: restart master api
- service: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ service: name={{ openshift_service_type }}-master-controllers state=restarted
notify: verify api server
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
diff --git a/playbooks/openshift-master/private/tasks/wire_aggregator.yml b/playbooks/openshift-master/private/tasks/wire_aggregator.yml
index ecf8f15d9..4f55d5c82 100644
--- a/playbooks/openshift-master/private/tasks/wire_aggregator.yml
+++ b/playbooks/openshift-master/private/tasks/wire_aggregator.yml
@@ -180,13 +180,13 @@
#restart master serially here
- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
when:
- yedit_output.changed
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
diff --git a/playbooks/openshift-master/redeploy-certificates.yml b/playbooks/openshift-master/redeploy-certificates.yml
index df727247b..8b7272485 100644
--- a/playbooks/openshift-master/redeploy-certificates.yml
+++ b/playbooks/openshift-master/redeploy-certificates.yml
@@ -1,6 +1,6 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/redeploy-certificates.yml
+- import_playbook: private/redeploy-certificates.yml
-- include: private/restart.yml
+- import_playbook: private/restart.yml
diff --git a/playbooks/openshift-master/redeploy-openshift-ca.yml b/playbooks/openshift-master/redeploy-openshift-ca.yml
index 3ae74c7a0..27f4e6b7d 100644
--- a/playbooks/openshift-master/redeploy-openshift-ca.yml
+++ b/playbooks/openshift-master/redeploy-openshift-ca.yml
@@ -1,4 +1,4 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/redeploy-openshift-ca.yml
+- import_playbook: private/redeploy-openshift-ca.yml
diff --git a/playbooks/openshift-nfs/private/config.yml b/playbooks/openshift-nfs/private/config.yml
index 6ea77e00b..3625efcc6 100644
--- a/playbooks/openshift-nfs/private/config.yml
+++ b/playbooks/openshift-nfs/private/config.yml
@@ -14,7 +14,6 @@
- name: Configure nfs
hosts: oo_nfs_to_config
roles:
- - role: os_firewall
- role: openshift_storage_nfs
- name: NFS Install Checkpoint End
diff --git a/playbooks/openshift-node/private/configure_nodes.yml b/playbooks/openshift-node/private/configure_nodes.yml
index dc5d7a57e..32b288c8b 100644
--- a/playbooks/openshift-node/private/configure_nodes.yml
+++ b/playbooks/openshift-node/private/configure_nodes.yml
@@ -10,7 +10,6 @@
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
roles:
- - role: os_firewall
- role: openshift_clock
- role: openshift_node
- role: tuned
diff --git a/playbooks/openshift-node/private/containerized_nodes.yml b/playbooks/openshift-node/private/containerized_nodes.yml
index 5afa83be7..ef07669cb 100644
--- a/playbooks/openshift-node/private/containerized_nodes.yml
+++ b/playbooks/openshift-node/private/containerized_nodes.yml
@@ -12,7 +12,6 @@
}}"
roles:
- - role: os_firewall
- role: openshift_clock
- role: openshift_node
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
diff --git a/playbooks/openshift-node/private/enable_excluders.yml b/playbooks/openshift-node/private/enable_excluders.yml
index 5288b14f9..30713e694 100644
--- a/playbooks/openshift-node/private/enable_excluders.yml
+++ b/playbooks/openshift-node/private/enable_excluders.yml
@@ -5,4 +5,3 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/openshift-node/private/redeploy-certificates.yml b/playbooks/openshift-node/private/redeploy-certificates.yml
index 3bd38a61d..c0f75ae80 100644
--- a/playbooks/openshift-node/private/redeploy-certificates.yml
+++ b/playbooks/openshift-node/private/redeploy-certificates.yml
@@ -1,6 +1,6 @@
---
-- include: certificates-backup.yml
+- import_playbook: certificates-backup.yml
-- include: certificates.yml
+- import_playbook: certificates.yml
vars:
openshift_certificates_redeploy: true
diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml
index 41eb00f99..0786bd7d3 100644
--- a/playbooks/openshift-node/private/restart.yml
+++ b/playbooks/openshift-node/private/restart.yml
@@ -23,9 +23,9 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
failed_when: false
when: openshift.common.is_containerized | bool
@@ -40,7 +40,7 @@
- name: restart node
service:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
state: restarted
- name: Wait for node to be ready
diff --git a/playbooks/openshift-node/private/setup.yml b/playbooks/openshift-node/private/setup.yml
index 794c03a67..541913aef 100644
--- a/playbooks/openshift-node/private/setup.yml
+++ b/playbooks/openshift-node/private/setup.yml
@@ -5,7 +5,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Evaluate node groups
hosts: localhost
diff --git a/playbooks/openshift-node/redeploy-certificates.yml b/playbooks/openshift-node/redeploy-certificates.yml
index df727247b..8b7272485 100644
--- a/playbooks/openshift-node/redeploy-certificates.yml
+++ b/playbooks/openshift-node/redeploy-certificates.yml
@@ -1,6 +1,6 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/redeploy-certificates.yml
+- import_playbook: private/redeploy-certificates.yml
-- include: private/restart.yml
+- import_playbook: private/restart.yml
diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md
index f567242cd..d361d6278 100644
--- a/playbooks/openstack/README.md
+++ b/playbooks/openstack/README.md
@@ -226,7 +226,7 @@ advanced configuration:
[hardware-requirements]: https://docs.openshift.org/latest/install_config/install/prerequisites.html#hardware
[origin]: https://www.openshift.org/
[centos7]: https://www.centos.org/
-[sample-openshift-inventory]: https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.example
+[sample-openshift-inventory]: https://github.com/openshift/openshift-ansible/blob/master/inventory/hosts.example
[advanced-configuration]: ./advanced-configuration.md
[accessing-openshift]: ./advanced-configuration.md#accessing-the-openshift-cluster
[uninstall-openshift]: ./advanced-configuration.md#removing-the-openshift-cluster
diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md
index db2a13d38..403e0e1a7 100644
--- a/playbooks/openstack/advanced-configuration.md
+++ b/playbooks/openstack/advanced-configuration.md
@@ -343,7 +343,7 @@ installation for example by specifying the authentication.
The full list of options is available in this sample inventory:
-https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.ose.example
+https://github.com/openshift/openshift-ansible/blob/master/inventory/hosts.example
Note, that in order to deploy OpenShift origin, you should update the following
variables for the `inventory/group_vars/OSEv3.yml`, `all.yml`:
@@ -604,7 +604,7 @@ A library of custom post-provision actions exists in `openshift-ansible-contrib/
Once it succeeds, you can install openshift by running:
- ansible-playbook openshift-ansible/playbooks/byo/config.yml
+ ansible-playbook openshift-ansible/playbooks/deploy_cluster.yml
## Access UI
diff --git a/playbooks/openstack/openshift-cluster/install.yml b/playbooks/openstack/openshift-cluster/install.yml
index 8ed01b192..3211f619a 100644
--- a/playbooks/openstack/openshift-cluster/install.yml
+++ b/playbooks/openstack/openshift-cluster/install.yml
@@ -9,4 +9,4 @@
# some logic here?
- name: run the cluster deploy
- include: ../../deploy_cluster.yml
+ import_playbook: ../../deploy_cluster.yml
diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml
index 3e295b2c8..583e72b51 100644
--- a/playbooks/openstack/openshift-cluster/provision.yml
+++ b/playbooks/openstack/openshift-cluster/provision.yml
@@ -10,7 +10,7 @@
# NOTE(shadower): Bring in the host groups:
- name: evaluate groups
- include: ../../init/evaluate_groups.yml
+ import_playbook: ../../init/evaluate_groups.yml
- name: Wait for the nodes and gather their facts
@@ -27,7 +27,7 @@
setup:
- name: set common facts
- include: ../../init/facts.yml
+ import_playbook: ../../init/facts.yml
# TODO(shadower): consider splitting this up so people can stop here
diff --git a/playbooks/openstack/openshift-cluster/provision_install.yml b/playbooks/openstack/openshift-cluster/provision_install.yml
index 5d88c105f..fc2854605 100644
--- a/playbooks/openstack/openshift-cluster/provision_install.yml
+++ b/playbooks/openstack/openshift-cluster/provision_install.yml
@@ -1,9 +1,9 @@
---
- name: Check the prerequisites for cluster provisioning in OpenStack
- include: prerequisites.yml
+ import_playbook: prerequisites.yml
- name: Include the provision.yml playbook to create cluster
- include: provision.yml
+ import_playbook: provision.yml
- name: Include the install.yml playbook to install cluster
- include: install.yml
+ import_playbook: install.yml
diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml
index 0cc5fcef8..7b7868cfe 100644
--- a/playbooks/prerequisites.yml
+++ b/playbooks/prerequisites.yml
@@ -3,4 +3,10 @@
vars:
skip_verison: True
+# This is required for container runtime for crio, only needs to run once.
+- name: Configure os_firewall
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config:oo_nodes_to_config
+ roles:
+ - role: os_firewall
+
- import_playbook: container-runtime/private/config.yml
diff --git a/playbooks/redeploy-certificates.yml b/playbooks/redeploy-certificates.yml
index 45135c10e..b5fcb951d 100644
--- a/playbooks/redeploy-certificates.yml
+++ b/playbooks/redeploy-certificates.yml
@@ -1,26 +1,26 @@
---
-- include: init/main.yml
+- import_playbook: init/main.yml
-- include: openshift-etcd/private/redeploy-certificates.yml
+- import_playbook: openshift-etcd/private/redeploy-certificates.yml
-- include: openshift-master/private/redeploy-certificates.yml
+- import_playbook: openshift-master/private/redeploy-certificates.yml
-- include: openshift-node/private/redeploy-certificates.yml
+- import_playbook: openshift-node/private/redeploy-certificates.yml
-- include: openshift-etcd/private/restart.yml
+- import_playbook: openshift-etcd/private/restart.yml
vars:
g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}"
-- include: openshift-master/private/restart.yml
+- import_playbook: openshift-master/private/restart.yml
-- include: openshift-node/private/restart.yml
+- import_playbook: openshift-node/private/restart.yml
-- include: openshift-hosted/private/redeploy-router-certificates.yml
+- import_playbook: openshift-hosted/private/redeploy-router-certificates.yml
when: openshift_hosted_manage_router | default(true) | bool
-- include: openshift-hosted/private/redeploy-registry-certificates.yml
+- import_playbook: openshift-hosted/private/redeploy-registry-certificates.yml
when: openshift_hosted_manage_registry | default(true) | bool
-- include: openshift-master/private/revert-client-ca.yml
+- import_playbook: openshift-master/private/revert-client-ca.yml
-- include: openshift-master/private/restart.yml
+- import_playbook: openshift-master/private/restart.yml
diff --git a/roles/container_runtime/defaults/main.yml b/roles/container_runtime/defaults/main.yml
index bd96965ac..d7eb8663f 100644
--- a/roles/container_runtime/defaults/main.yml
+++ b/roles/container_runtime/defaults/main.yml
@@ -59,6 +59,7 @@ docker_default_storage_path: /var/lib/docker
# Set local versions of facts that must be in json format for container-daemon.json
# NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson
l_docker_log_options: "{{ l2_docker_log_options | to_json }}"
+l_docker_log_options_dict: "{{ l2_docker_log_options | oo_list_to_dict | to_json }}"
l_docker_additional_registries: "{{ l2_docker_additional_registries | to_json }}"
l_docker_blocked_registries: "{{ l2_docker_blocked_registries | to_json }}"
l_docker_insecure_registries: "{{ l2_docker_insecure_registries | to_json }}"
@@ -81,6 +82,7 @@ l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure
l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}"
l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}"
+
openshift_crio_image_tag_default: "latest"
l_crt_crio_image_tag_dict:
@@ -127,3 +129,5 @@ l_docker_image_tag: "{{ l_crt_docker_image_tag_dict[openshift_deployment_type] }
l_docker_image_default: "{{ l_docker_image_prepend }}/{{ openshift_docker_service_name }}:{{ l_docker_image_tag }}"
l_docker_image: "{{ openshift_docker_systemcontainer_image_override | default(l_docker_image_default) }}"
+
+l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
diff --git a/roles/container_runtime/tasks/systemcontainer_crio.yml b/roles/container_runtime/tasks/systemcontainer_crio.yml
index 5ea7df650..61f122f3c 100644
--- a/roles/container_runtime/tasks/systemcontainer_crio.yml
+++ b/roles/container_runtime/tasks/systemcontainer_crio.yml
@@ -4,7 +4,7 @@
fail: msg='Cannot use CRI-O with node configured as a Docker container'
when:
- openshift.common.is_containerized | bool
- - not openshift.common.is_node_system_container | bool
+ - not l_is_node_system_container | bool
- include_tasks: common/pre.yml
diff --git a/roles/container_runtime/templates/daemon.json b/roles/container_runtime/templates/daemon.json
index 383963bd3..1a72d812a 100644
--- a/roles/container_runtime/templates/daemon.json
+++ b/roles/container_runtime/templates/daemon.json
@@ -5,10 +5,10 @@
"disable-legacy-registry": false,
"exec-opts": ["native.cgroupdriver=systemd"],
"insecure-registries": {{ l_docker_insecure_registries }},
-{% if openshift_docker_log_driver is defined %}
+{% if openshift_docker_log_driver %}
"log-driver": "{{ openshift_docker_log_driver }}",
{%- endif %}
- "log-opts": {{ l_docker_log_options }},
+ "log-opts": {{ l_docker_log_options_dict }},
"runtimes": {
"oci": {
"path": "/usr/libexec/docker/docker-runc-current"
diff --git a/roles/etcd/tasks/migration/add_ttls.yml b/roles/etcd/tasks/migration/add_ttls.yml
index 4bdc6bcc3..a4b0ff31d 100644
--- a/roles/etcd/tasks/migration/add_ttls.yml
+++ b/roles/etcd/tasks/migration/add_ttls.yml
@@ -11,7 +11,7 @@
- name: Re-introduce leases (as a replacement for key TTLs)
command: >
- oadm migrate etcd-ttl \
+ {{ openshift.common.client_binary }} adm migrate etcd-ttl \
--cert {{ r_etcd_common_master_peer_cert_file }} \
--key {{ r_etcd_common_master_peer_key_file }} \
--cacert {{ r_etcd_common_master_peer_ca_file }} \
diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml
index 80e4d391d..705d39f9a 100644
--- a/roles/flannel/handlers/main.yml
+++ b/roles/flannel/handlers/main.yml
@@ -15,7 +15,7 @@
- name: restart node
systemd:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
state: restarted
register: l_restart_node_result
until: not l_restart_node_result | failed
diff --git a/roles/kuryr/tasks/node.yaml b/roles/kuryr/tasks/node.yaml
index ffe814713..08f2d5adc 100644
--- a/roles/kuryr/tasks/node.yaml
+++ b/roles/kuryr/tasks/node.yaml
@@ -36,7 +36,7 @@
- name: Configure OpenShift node with disabled service proxy
lineinfile:
- dest: "/etc/sysconfig/{{ openshift.common.service_type }}-node"
+ dest: "/etc/sysconfig/{{ openshift_service_type }}-node"
regexp: '^OPTIONS="?(.*?)"?$'
backrefs: yes
backup: yes
@@ -44,5 +44,5 @@
- name: force node restart to disable the proxy
service:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
state: restarted
diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml
index cb83c8ead..7b55dda56 100644
--- a/roles/nuage_master/handlers/main.yaml
+++ b/roles/nuage_master/handlers/main.yaml
@@ -1,6 +1,6 @@
---
- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
when: >
(openshift_master_ha | bool) and
(not master_api_service_status_changed | default(false))
@@ -8,7 +8,7 @@
# TODO: need to fix up ignore_errors here
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
diff --git a/roles/nuage_node/handlers/main.yaml b/roles/nuage_node/handlers/main.yaml
index e68ae74bd..ede6f2125 100644
--- a/roles/nuage_node/handlers/main.yaml
+++ b/roles/nuage_node/handlers/main.yaml
@@ -1,7 +1,7 @@
---
- name: restart node
become: yes
- systemd: name={{ openshift.common.service_type }}-node daemon-reload=yes state=restarted
+ systemd: name={{ openshift_service_type }}-node daemon-reload=yes state=restarted
- name: save iptable rules
become: yes
diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml
index fdf01b7c2..88d62de49 100644
--- a/roles/nuage_node/vars/main.yaml
+++ b/roles/nuage_node/vars/main.yaml
@@ -23,5 +23,5 @@ cni_conf_dir: "/etc/cni/net.d/"
cni_bin_dir: "/opt/cni/bin/"
nuage_plugin_crt_dir: /usr/share/vsp-openshift
-openshift_atomic_node_config_file: /etc/sysconfig/{{ openshift.common.service_type }}-node
+openshift_atomic_node_config_file: /etc/sysconfig/{{ openshift_service_type }}-node
nuage_atomic_docker_additional_mounts: "NUAGE_ADDTL_BIND_MOUNTS=-v /var/usr/share/vsp-openshift:/var/usr/share/vsp-openshift -v /etc/default:/etc/default -v /var/run:/var/run -v /opt/cni/bin:/opt/cni/bin -v /etc/cni/net.d:/etc/cni/net.d"
diff --git a/roles/openshift_builddefaults/tasks/main.yml b/roles/openshift_builddefaults/tasks/main.yml
index e0b51eee0..612b6522d 100644
--- a/roles/openshift_builddefaults/tasks/main.yml
+++ b/roles/openshift_builddefaults/tasks/main.yml
@@ -4,11 +4,6 @@
role: builddefaults
# TODO: add ability to define builddefaults env vars sort of like this
# may need to move the config generation to a filter however.
- # openshift_env: "{{ hostvars
- # | oo_merge_hostvars(vars, inventory_hostname)
- # | oo_openshift_env }}"
- # openshift_env_structures:
- # - 'openshift.builddefaults.env.*'
local_facts:
http_proxy: "{{ openshift_builddefaults_http_proxy | default(None) }}"
https_proxy: "{{ openshift_builddefaults_https_proxy | default(None) }}"
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index 05e0a1352..eb00f13db 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -9,7 +9,7 @@
- name: Install the base package for admin tooling
package:
- name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift_service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
when: not openshift.common.is_containerized | bool
register: install_result
diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py
index 08045794a..440b8ec28 100644
--- a/roles/openshift_cli/library/openshift_container_binary_sync.py
+++ b/roles/openshift_cli/library/openshift_container_binary_sync.py
@@ -27,7 +27,7 @@ class BinarySyncError(Exception):
# pylint: disable=too-few-public-methods,too-many-instance-attributes
class BinarySyncer(object):
"""
- Syncs the openshift, oc, oadm, and kubectl binaries/symlinks out of
+ Syncs the openshift, oc, and kubectl binaries/symlinks out of
a container onto the host system.
"""
@@ -108,7 +108,10 @@ class BinarySyncer(object):
# Ensure correct symlinks created:
self._sync_symlink('kubectl', 'openshift')
- self._sync_symlink('oadm', 'openshift')
+
+ # Remove old oadm binary
+ if os.path.exists(os.path.join(self.bin_dir, 'oadm')):
+ os.remove(os.path.join(self.bin_dir, 'oadm'))
def _sync_symlink(self, binary_name, link_to):
""" Ensure the given binary name exists and links to the expected binary. """
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index 140c6ea26..a90143aa3 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: Install clients
- package: name={{ openshift.common.service_type }}-clients state=present
+ package: name={{ openshift_service_type }}-clients state=present
when: not openshift.common.is_containerized | bool
register: result
until: result | success
diff --git a/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2 b/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2
index 53e8b448b..3d51abc52 100644
--- a/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2
+++ b/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2
@@ -5,7 +5,7 @@ items:
kind: ServiceAccount
metadata:
name: dockergc
- # You must grant privileged via: oadm policy add-scc-to-user -z dockergc privileged
+ # You must grant privileged via: oc adm policy add-scc-to-user -z dockergc privileged
# in order for the dockergc to access the docker socket and root directory
- apiVersion: extensions/v1beta1
kind: DaemonSet
diff --git a/roles/openshift_excluder/README.md b/roles/openshift_excluder/README.md
index 80cb88d45..7b43d5adf 100644
--- a/roles/openshift_excluder/README.md
+++ b/roles/openshift_excluder/README.md
@@ -28,7 +28,7 @@ Role Variables
| r_openshift_excluder_verify_upgrade | false | true, false | When upgrading, this variable should be set to true when calling the role |
| r_openshift_excluder_package_state | present | present, latest | Use 'latest' to upgrade openshift_excluder package |
| r_openshift_excluder_docker_package_state | present | present, latest | Use 'latest' to upgrade docker_excluder package |
-| r_openshift_excluder_service_type | None | | (Required) Defined as openshift.common.service_type e.g. atomic-openshift |
+| r_openshift_excluder_service_type | None | | (Required) Defined as openshift_service_type e.g. atomic-openshift |
| r_openshift_excluder_upgrade_target | None | | Required when r_openshift_excluder_verify_upgrade is true, defined as openshift_upgrade_target by Upgrade playbooks e.g. '3.6'|
Dependencies
@@ -46,15 +46,12 @@ Example Playbook
# Disable all excluders
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
# Enable all excluders
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
# Disable all excluders and verify appropriate excluder packages are available for upgrade
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
r_openshift_excluder_verify_upgrade: true
r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
r_openshift_excluder_package_state: latest
diff --git a/roles/openshift_excluder/defaults/main.yml b/roles/openshift_excluder/defaults/main.yml
index d4f151142..3a910e490 100644
--- a/roles/openshift_excluder/defaults/main.yml
+++ b/roles/openshift_excluder/defaults/main.yml
@@ -2,7 +2,7 @@
# keep the 'current' package or update to 'latest' if available?
r_openshift_excluder_package_state: present
r_openshift_excluder_docker_package_state: present
-
+r_openshift_excluder_service_type: "{{ openshift_service_type }}"
# Legacy variables are included for backwards compatibility with v3.5
# Inventory variables Legacy
# openshift_enable_excluders enable_excluders
diff --git a/roles/openshift_excluder/meta/main.yml b/roles/openshift_excluder/meta/main.yml
index 871081c19..a9653edda 100644
--- a/roles/openshift_excluder/meta/main.yml
+++ b/roles/openshift_excluder/meta/main.yml
@@ -12,4 +12,5 @@ galaxy_info:
categories:
- cloud
dependencies:
+- role: openshift_facts
- role: lib_utils
diff --git a/roles/openshift_excluder/tasks/main.yml b/roles/openshift_excluder/tasks/main.yml
index 93d6ef149..f0e87ba25 100644
--- a/roles/openshift_excluder/tasks/main.yml
+++ b/roles/openshift_excluder/tasks/main.yml
@@ -19,11 +19,6 @@
msg: "openshift_excluder role can only be called with 'enable' or 'disable'"
when: r_openshift_excluder_action not in ['enable', 'disable']
- - name: Fail if r_openshift_excluder_service_type is not defined
- fail:
- msg: "r_openshift_excluder_service_type must be specified for this role"
- when: r_openshift_excluder_service_type is not defined
-
- name: Fail if r_openshift_excluder_upgrade_target is not defined
fail:
msg: "r_openshift_excluder_upgrade_target must be provided when using this role for upgrades"
diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml
index a182d23c5..53a3bc87e 100644
--- a/roles/openshift_facts/defaults/main.yml
+++ b/roles/openshift_facts/defaults/main.yml
@@ -98,3 +98,9 @@ openshift_prometheus_alertbuffer_storage_create_pvc: False
openshift_router_selector: "region=infra"
openshift_hosted_router_selector: "{{ openshift_router_selector }}"
openshift_hosted_registry_selector: "{{ openshift_router_selector }}"
+
+openshift_service_type_dict:
+ origin: origin
+ openshift-enterprise: atomic-openshift
+
+openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index b371d347c..520c00340 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -94,8 +94,7 @@ def migrate_admission_plugin_facts(facts):
# Merge existing kube_admission_plugin_config with admission_plugin_config.
facts['master']['admission_plugin_config'] = merge_facts(facts['master']['admission_plugin_config'],
facts['master']['kube_admission_plugin_config'],
- additive_facts_to_overwrite=[],
- protected_facts_to_overwrite=[])
+ additive_facts_to_overwrite=[])
# Remove kube_admission_plugin_config fact
facts['master'].pop('kube_admission_plugin_config', None)
return facts
@@ -538,7 +537,7 @@ def set_aggregate_facts(facts):
def set_deployment_facts_if_unset(facts):
""" Set Facts that vary based on deployment_type. This currently
- includes common.service_type, master.registry_url, node.registry_url,
+ includes master.registry_url, node.registry_url,
node.storage_plugin_deps
Args:
@@ -550,14 +549,6 @@ def set_deployment_facts_if_unset(facts):
# disabled to avoid breaking up facts related to deployment type into
# multiple methods for now.
# pylint: disable=too-many-statements, too-many-branches
- if 'common' in facts:
- deployment_type = facts['common']['deployment_type']
- if 'service_type' not in facts['common']:
- service_type = 'atomic-openshift'
- if deployment_type == 'origin':
- service_type = 'origin'
- facts['common']['service_type'] = service_type
-
for role in ('master', 'node'):
if role in facts:
deployment_type = facts['common']['deployment_type']
@@ -854,7 +845,7 @@ values provided as a list. Hence the gratuitous use of ['foo'] below.
# If we've added items to the kubelet_args dict then we need
# to merge the new items back into the main facts object.
if kubelet_args != {}:
- facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], [])
+ facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [])
return facts
@@ -876,7 +867,7 @@ def build_controller_args(facts):
controller_args['cloud-provider'] = ['gce']
controller_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
if controller_args != {}:
- facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], [])
+ facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [])
return facts
@@ -898,7 +889,7 @@ def build_api_server_args(facts):
api_server_args['cloud-provider'] = ['gce']
api_server_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
if api_server_args != {}:
- facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], [])
+ facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [])
return facts
@@ -1021,8 +1012,13 @@ def get_container_openshift_version(facts):
If containerized, see if we can determine the installed version via the
systemd environment files.
"""
+ deployment_type = facts['common']['deployment_type']
+ service_type_dict = {'origin': 'origin',
+ 'openshift-enterprise': 'atomic-openshift'}
+ service_type = service_type_dict[deployment_type]
+
for filename in ['/etc/sysconfig/%s-master-controllers', '/etc/sysconfig/%s-node']:
- env_path = filename % facts['common']['service_type']
+ env_path = filename % service_type
if not os.path.exists(env_path):
continue
@@ -1085,7 +1081,7 @@ def apply_provider_facts(facts, provider_facts):
# Disabling pylint too many branches. This function needs refactored
# but is a very core part of openshift_facts.
# pylint: disable=too-many-branches, too-many-nested-blocks
-def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite):
+def merge_facts(orig, new, additive_facts_to_overwrite):
""" Recursively merge facts dicts
Args:
@@ -1093,14 +1089,11 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
new (dict): facts to update
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
- protected_facts_to_overwrite (list): protected facts to overwrite in jinja
- '.' notation ex: ['master.master_count']
Returns:
dict: the merged facts
"""
additive_facts = ['named_certificates']
- protected_facts = ['ha']
# Facts we do not ever want to merge. These originate in inventory variables
# and contain JSON dicts. We don't ever want to trigger a merge
@@ -1132,14 +1125,7 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
if '.' in item and item.startswith(key + '.'):
relevant_additive_facts.append(item)
- # Collect the subset of protected facts to overwrite
- # if key matches. These will be passed to the
- # subsequent merge_facts call.
- relevant_protected_facts = []
- for item in protected_facts_to_overwrite:
- if '.' in item and item.startswith(key + '.'):
- relevant_protected_facts.append(item)
- facts[key] = merge_facts(value, new[key], relevant_additive_facts, relevant_protected_facts)
+ facts[key] = merge_facts(value, new[key], relevant_additive_facts)
# Key matches an additive fact and we are not overwriting
# it so we will append the new value to the existing value.
elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
@@ -1149,18 +1135,6 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
if item not in new_fact:
new_fact.append(item)
facts[key] = new_fact
- # Key matches a protected fact and we are not overwriting
- # it so we will determine if it is okay to change this
- # fact.
- elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]:
- # ha (bool) can not change unless it has been passed
- # as a protected fact to overwrite.
- if key == 'ha':
- if safe_get_bool(value) != safe_get_bool(new[key]):
- # pylint: disable=line-too-long
- module.fail_json(msg='openshift_facts received a different value for openshift.master.ha') # noqa: F405
- else:
- facts[key] = value
# No other condition has been met. Overwrite the old fact
# with the new value.
else:
@@ -1433,7 +1407,6 @@ def set_container_facts_if_unset(facts):
facts['node']['ovs_system_image'] = ovs_image
if safe_get_bool(facts['common']['is_containerized']):
- facts['common']['admin_binary'] = '/usr/local/bin/oadm'
facts['common']['client_binary'] = '/usr/local/bin/oc'
return facts
@@ -1494,8 +1467,6 @@ class OpenShiftFacts(object):
local_facts (dict): local facts to set
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
- protected_facts_to_overwrite (list): protected facts to overwrite in jinja
- '.' notation ex: ['master.master_count']
Raises:
OpenShiftFactsUnsupportedRoleError:
@@ -1511,10 +1482,7 @@ class OpenShiftFacts(object):
# Disabling too-many-arguments, this should be cleaned up as a TODO item.
# pylint: disable=too-many-arguments,no-value-for-parameter
def __init__(self, role, filename, local_facts,
- additive_facts_to_overwrite=None,
- openshift_env=None,
- openshift_env_structures=None,
- protected_facts_to_overwrite=None):
+ additive_facts_to_overwrite=None):
self.changed = False
self.filename = filename
if role not in self.known_roles:
@@ -1536,34 +1504,23 @@ class OpenShiftFacts(object):
self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405
self.facts = self.generate_facts(local_facts,
- additive_facts_to_overwrite,
- openshift_env,
- openshift_env_structures,
- protected_facts_to_overwrite)
+ additive_facts_to_overwrite)
def generate_facts(self,
local_facts,
- additive_facts_to_overwrite,
- openshift_env,
- openshift_env_structures,
- protected_facts_to_overwrite):
+ additive_facts_to_overwrite):
""" Generate facts
Args:
local_facts (dict): local_facts for overriding generated defaults
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
- openshift_env (dict): openshift_env facts for overriding generated defaults
- protected_facts_to_overwrite (list): protected facts to overwrite in jinja
- '.' notation ex: ['master.master_count']
Returns:
dict: The generated facts
"""
+
local_facts = self.init_local_facts(local_facts,
- additive_facts_to_overwrite,
- openshift_env,
- openshift_env_structures,
- protected_facts_to_overwrite)
+ additive_facts_to_overwrite)
roles = local_facts.keys()
if 'common' in local_facts and 'deployment_type' in local_facts['common']:
@@ -1581,8 +1538,7 @@ class OpenShiftFacts(object):
facts = apply_provider_facts(defaults, provider_facts)
facts = merge_facts(facts,
local_facts,
- additive_facts_to_overwrite,
- protected_facts_to_overwrite)
+ additive_facts_to_overwrite)
facts = migrate_oauth_template_facts(facts)
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
@@ -1627,7 +1583,7 @@ class OpenShiftFacts(object):
hostname=hostname,
public_hostname=hostname,
portal_net='172.30.0.0/16',
- client_binary='oc', admin_binary='oadm',
+ client_binary='oc',
dns_domain='cluster.local',
config_base='/etc/origin')
@@ -1732,65 +1688,17 @@ class OpenShiftFacts(object):
)
return provider_facts
- @staticmethod
- def split_openshift_env_fact_keys(openshift_env_fact, openshift_env_structures):
- """ Split openshift_env facts based on openshift_env structures.
-
- Args:
- openshift_env_fact (string): the openshift_env fact to split
- ex: 'openshift_cloudprovider_openstack_auth_url'
- openshift_env_structures (list): a list of structures to determine fact keys
- ex: ['openshift.cloudprovider.openstack.*']
- Returns:
- list: a list of keys that represent the fact
- ex: ['openshift', 'cloudprovider', 'openstack', 'auth_url']
- """
- # By default, we'll split an openshift_env fact by underscores.
- fact_keys = openshift_env_fact.split('_')
-
- # Determine if any of the provided variable structures match the fact.
- matching_structure = None
- if openshift_env_structures is not None:
- for structure in openshift_env_structures:
- if re.match(structure, openshift_env_fact):
- matching_structure = structure
- # Fact didn't match any variable structures so return the default fact keys.
- if matching_structure is None:
- return fact_keys
-
- final_keys = []
- structure_keys = matching_structure.split('.')
- for structure_key in structure_keys:
- # Matched current key. Add to final keys.
- if structure_key == fact_keys[structure_keys.index(structure_key)]:
- final_keys.append(structure_key)
- # Wildcard means we will be taking everything from here to the end of the fact.
- elif structure_key == '*':
- final_keys.append('_'.join(fact_keys[structure_keys.index(structure_key):]))
- # Shouldn't have gotten here, return the fact keys.
- else:
- return fact_keys
- return final_keys
-
# Disabling too-many-branches and too-many-locals.
# This should be cleaned up as a TODO item.
# pylint: disable=too-many-branches, too-many-locals
def init_local_facts(self, facts=None,
- additive_facts_to_overwrite=None,
- openshift_env=None,
- openshift_env_structures=None,
- protected_facts_to_overwrite=None):
+ additive_facts_to_overwrite=None):
""" Initialize the local facts
Args:
facts (dict): local facts to set
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
- openshift_env (dict): openshift env facts to set
- protected_facts_to_overwrite (list): protected facts to overwrite in jinja
- '.' notation ex: ['master.master_count']
-
-
Returns:
dict: The result of merging the provided facts with existing
local facts
@@ -1802,45 +1710,13 @@ class OpenShiftFacts(object):
if facts is not None:
facts_to_set[self.role] = facts
- if openshift_env != {} and openshift_env is not None:
- for fact, value in iteritems(openshift_env):
- oo_env_facts = dict()
- current_level = oo_env_facts
- keys = self.split_openshift_env_fact_keys(fact, openshift_env_structures)[1:]
-
- if len(keys) > 0 and keys[0] != self.role:
- continue
-
- # Build a dictionary from the split fact keys.
- # After this loop oo_env_facts is the resultant dictionary.
- # For example:
- # fact = "openshift_metrics_install_metrics"
- # value = 'true'
- # keys = ['metrics', 'install', 'metrics']
- # result = {'metrics': {'install': {'metrics': 'true'}}}
- for i, _ in enumerate(keys):
- # This is the last key. Set the value.
- if i == (len(keys) - 1):
- current_level[keys[i]] = value
- # This is a key other than the last key. Set as
- # dictionary and continue.
- else:
- current_level[keys[i]] = dict()
- current_level = current_level[keys[i]]
-
- facts_to_set = merge_facts(orig=facts_to_set,
- new=oo_env_facts,
- additive_facts_to_overwrite=[],
- protected_facts_to_overwrite=[])
-
local_facts = get_local_facts_from_file(self.filename)
migrated_facts = migrate_local_facts(local_facts)
new_local_facts = merge_facts(migrated_facts,
facts_to_set,
- additive_facts_to_overwrite,
- protected_facts_to_overwrite)
+ additive_facts_to_overwrite)
new_local_facts = self.remove_empty_facts(new_local_facts)
@@ -1948,9 +1824,6 @@ def main():
choices=OpenShiftFacts.known_roles),
local_facts=dict(default=None, type='dict', required=False),
additive_facts_to_overwrite=dict(default=[], type='list', required=False),
- openshift_env=dict(default={}, type='dict', required=False),
- openshift_env_structures=dict(default=[], type='list', required=False),
- protected_facts_to_overwrite=dict(default=[], type='list', required=False)
),
supports_check_mode=True,
add_file_common_args=True,
@@ -1966,19 +1839,13 @@ def main():
role = module.params['role'] # noqa: F405
local_facts = module.params['local_facts'] # noqa: F405
additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] # noqa: F405
- openshift_env = module.params['openshift_env'] # noqa: F405
- openshift_env_structures = module.params['openshift_env_structures'] # noqa: F405
- protected_facts_to_overwrite = module.params['protected_facts_to_overwrite'] # noqa: F405
fact_file = '/etc/ansible/facts.d/openshift.fact'
openshift_facts = OpenShiftFacts(role,
fact_file,
local_facts,
- additive_facts_to_overwrite,
- openshift_env,
- openshift_env_structures,
- protected_facts_to_overwrite)
+ additive_facts_to_overwrite)
file_params = module.params.copy() # noqa: F405
file_params['path'] = fact_file
diff --git a/roles/openshift_health_checker/HOWTO_CHECKS.md b/roles/openshift_health_checker/HOWTO_CHECKS.md
index 6c5662a4e..94961f2d4 100644
--- a/roles/openshift_health_checker/HOWTO_CHECKS.md
+++ b/roles/openshift_health_checker/HOWTO_CHECKS.md
@@ -12,7 +12,7 @@ Checks are typically implemented as two parts:
The checks are called from Ansible playbooks via the `openshift_health_check`
action plugin. See
-[playbooks/byo/openshift-preflight/check.yml](../../playbooks/byo/openshift-preflight/check.yml)
+[playbooks/openshift-checks/pre-install.yml](../../playbooks/openshift-checks/pre-install.yml)
for an example.
The action plugin dynamically discovers all checks and executes only those
diff --git a/roles/openshift_health_checker/defaults/main.yml b/roles/openshift_health_checker/defaults/main.yml
new file mode 100644
index 000000000..f25a0dc79
--- /dev/null
+++ b/roles/openshift_health_checker/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+openshift_service_type_dict:
+ origin: origin
+ openshift-enterprise: atomic-openshift
+
+openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py
index 090e438ff..980e23f27 100644
--- a/roles/openshift_health_checker/openshift_checks/package_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/package_availability.py
@@ -15,7 +15,9 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
return super(PackageAvailability, self).is_active() and self.get_var("ansible_pkg_mgr") == "yum"
def run(self):
- rpm_prefix = self.get_var("openshift", "common", "service_type")
+ rpm_prefix = self.get_var("openshift_service_type")
+ if self._templar is not None:
+ rpm_prefix = self._templar.template(rpm_prefix)
group_names = self.get_var("group_names", default=[])
packages = set()
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index 13a91dadf..f3a628e28 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -41,7 +41,9 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
return super(PackageVersion, self).is_active() and master_or_node
def run(self):
- rpm_prefix = self.get_var("openshift", "common", "service_type")
+ rpm_prefix = self.get_var("openshift_service_type")
+ if self._templar is not None:
+ rpm_prefix = self._templar.template(rpm_prefix)
openshift_release = self.get_var("openshift_release", default='')
deployment_type = self.get_var("openshift_deployment_type")
check_multi_minor_release = deployment_type in ['openshift-enterprise']
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index ec46c3b4b..fc333dfd4 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -8,12 +8,12 @@ def task_vars():
return dict(
openshift=dict(
common=dict(
- service_type='origin',
is_containerized=False,
is_atomic=False,
),
docker=dict(),
),
+ openshift_service_type='origin',
openshift_deployment_type='origin',
openshift_image_tag='',
group_names=['oo_nodes_to_config', 'oo_masters_to_config'],
diff --git a/roles/openshift_health_checker/test/etcd_traffic_test.py b/roles/openshift_health_checker/test/etcd_traffic_test.py
index dd6f4ad81..a29dc166b 100644
--- a/roles/openshift_health_checker/test/etcd_traffic_test.py
+++ b/roles/openshift_health_checker/test/etcd_traffic_test.py
@@ -37,8 +37,9 @@ def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words)
task_vars = dict(
group_names=group_names,
openshift=dict(
- common=dict(service_type="origin", is_containerized=False),
- )
+ common=dict(is_containerized=False),
+ ),
+ openshift_service_type="origin"
)
result = EtcdTraffic(execute_module, task_vars).run()
diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py
index 6f0457549..dd98ff4d8 100644
--- a/roles/openshift_health_checker/test/ovs_version_test.py
+++ b/roles/openshift_health_checker/test/ovs_version_test.py
@@ -10,10 +10,11 @@ def test_openshift_version_not_supported():
openshift_release = '111.7.0'
task_vars = dict(
- openshift=dict(common=dict(service_type='origin')),
+ openshift=dict(common=dict()),
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
openshift_deployment_type='origin',
+ openshift_service_type='origin'
)
with pytest.raises(OpenShiftCheckException) as excinfo:
@@ -27,9 +28,10 @@ def test_invalid_openshift_release_format():
return {}
task_vars = dict(
- openshift=dict(common=dict(service_type='origin')),
+ openshift=dict(common=dict()),
openshift_image_tag='v0',
openshift_deployment_type='origin',
+ openshift_service_type='origin'
)
with pytest.raises(OpenShiftCheckException) as excinfo:
@@ -47,9 +49,10 @@ def test_invalid_openshift_release_format():
])
def test_ovs_package_version(openshift_release, expected_ovs_version):
task_vars = dict(
- openshift=dict(common=dict(service_type='origin')),
+ openshift=dict(common=dict()),
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
+ openshift_service_type='origin'
)
return_value = {} # note: check.execute_module modifies return hash contents
diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py
index 9815acb38..a1e6e0879 100644
--- a/roles/openshift_health_checker/test/package_availability_test.py
+++ b/roles/openshift_health_checker/test/package_availability_test.py
@@ -19,13 +19,13 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
@pytest.mark.parametrize('task_vars,must_have_packages,must_not_have_packages', [
(
- dict(openshift=dict(common=dict(service_type='openshift'))),
+ dict(openshift_service_type='origin'),
set(),
set(['openshift-master', 'openshift-node']),
),
(
dict(
- openshift=dict(common=dict(service_type='origin')),
+ openshift_service_type='origin',
group_names=['oo_masters_to_config'],
),
set(['origin-master']),
@@ -33,7 +33,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
),
(
dict(
- openshift=dict(common=dict(service_type='atomic-openshift')),
+ openshift_service_type='atomic-openshift',
group_names=['oo_nodes_to_config'],
),
set(['atomic-openshift-node']),
@@ -41,7 +41,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
),
(
dict(
- openshift=dict(common=dict(service_type='atomic-openshift')),
+ openshift_service_type='atomic-openshift',
group_names=['oo_masters_to_config', 'oo_nodes_to_config'],
),
set(['atomic-openshift-master', 'atomic-openshift-node']),
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index 3cf4ce033..ea8e02b97 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -4,9 +4,12 @@ from openshift_checks.package_version import PackageVersion, OpenShiftCheckExcep
def task_vars_for(openshift_release, deployment_type):
+ service_type_dict = {'origin': 'origin',
+ 'openshift-enterprise': 'atomic-openshift'}
+ service_type = service_type_dict[deployment_type]
return dict(
ansible_pkg_mgr='yum',
- openshift=dict(common=dict(service_type=deployment_type)),
+ openshift_service_type=service_type,
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
openshift_deployment_type=deployment_type,
@@ -29,7 +32,7 @@ def test_openshift_version_not_supported():
def test_invalid_openshift_release_format():
task_vars = dict(
ansible_pkg_mgr='yum',
- openshift=dict(common=dict(service_type='origin')),
+ openshift_service_type='origin',
openshift_image_tag='v0',
openshift_deployment_type='origin',
)
diff --git a/roles/openshift_hosted/tasks/registry.yml b/roles/openshift_hosted/tasks/registry.yml
index de302c740..429f0c514 100644
--- a/roles/openshift_hosted/tasks/registry.yml
+++ b/roles/openshift_hosted/tasks/registry.yml
@@ -126,7 +126,7 @@
selector: "{{ openshift_hosted_registry_selector }}"
replicas: "{{ openshift_hosted_registry_replicas | default(l_default_replicas) }}"
service_account: "{{ openshift_hosted_registry_serviceaccount }}"
- images: "{{ penshift_hosted_registry_registryurl }}"
+ images: "{{ openshift_hosted_registry_registryurl }}"
env_vars: "{{ openshift_hosted_registry_env_vars }}"
volume_mounts: "{{ openshift_hosted_registry_volumes }}"
edits: "{{ openshift_hosted_registry_edits }}"
diff --git a/roles/openshift_hosted/templates/v3.8/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.8/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..3c874d910
--- /dev/null
+++ b/roles/openshift_hosted/templates/v3.8/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
+subsets:
+- addresses:
+{% for ip in openshift_hosted_registry_storage_glusterfs_ips %}
+ - ip: {{ ip }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_hosted/templates/v3.8/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.8/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..f18c94a4f
--- /dev/null
+++ b/roles/openshift_hosted/templates/v3.8/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_hosted/templates/v3.9/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.9/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..3c874d910
--- /dev/null
+++ b/roles/openshift_hosted/templates/v3.9/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
+subsets:
+- addresses:
+{% for ip in openshift_hosted_registry_storage_glusterfs_ips %}
+ - ip: {{ ip }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_hosted/templates/v3.9/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.9/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..f18c94a4f
--- /dev/null
+++ b/roles/openshift_hosted/templates/v3.9/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_logging/handlers/main.yml b/roles/openshift_logging/handlers/main.yml
index e0329ee7c..1f4b5a116 100644
--- a/roles/openshift_logging/handlers/main.yml
+++ b/roles/openshift_logging/handlers/main.yml
@@ -1,12 +1,12 @@
---
- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
when: (not (master_api_service_status_changed | default(false) | bool))
notify: Verify API Server
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index 0bfa9e85b..bf04094a3 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -9,6 +9,7 @@ metadata:
logging-infra: "{{logging_component}}"
spec:
replicas: {{es_replicas|default(1)}}
+ revisionHistoryLimit: 0
selector:
provider: openshift
component: "{{component}}"
diff --git a/roles/openshift_management/README.md b/roles/openshift_management/README.md
index 96de82669..974d9781a 100644
--- a/roles/openshift_management/README.md
+++ b/roles/openshift_management/README.md
@@ -164,14 +164,14 @@ away.
If you want to install CFME/MIQ at the same time you install your
OCP/Origin cluster, ensure that `openshift_management_install_management` is set
to `true` in your inventory. Call the standard
-`playbooks/byo/config.yml` playbook to begin the cluster and CFME/MIQ
+`playbooks/deploy_cluster.yml` playbook to begin the cluster and CFME/MIQ
installation.
If you are installing CFME/MIQ on an *already provisioned cluster*
then you can call the CFME/MIQ playbook directly:
```
-$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/config.yml
+$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/openshift-management/config.yml
```
*Note: Use `miq-template` in the following examples for ManageIQ installs*
@@ -489,7 +489,7 @@ This playbook will:
```
-$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/add_container_provider.yml
+$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/openshift-management/add_container_provider.yml
```
## Multiple Providers
@@ -567,7 +567,7 @@ the config file path.
```
$ ansible-playbook -v -e container_providers_config=/tmp/cp.yml \
- playbooks/byo/openshift-management/add_many_container_providers.yml
+ playbooks/openshift-management/add_many_container_providers.yml
```
Afterwards you will find two new container providers in your
@@ -579,7 +579,7 @@ to see an overview.
This role includes a playbook to uninstall and erase the CFME/MIQ
installation:
-* `playbooks/byo/openshift-management/uninstall.yml`
+* `playbooks/openshift-management/uninstall.yml`
NFS export definitions and data stored on NFS exports are not
automatically removed. You are urged to manually erase any data from
diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml
index e768961ce..b5e234b7f 100644
--- a/roles/openshift_management/defaults/main.yml
+++ b/roles/openshift_management/defaults/main.yml
@@ -88,7 +88,7 @@ openshift_management_storage_nfs_local_hostname: false
# name and password AND are trying to use integration scripts.
#
# For example, adding this cluster as a container provider,
-# playbooks/byo/openshift-management/add_container_provider.yml
+# playbooks/openshift-management/add_container_provider.yml
openshift_management_username: admin
openshift_management_password: smartvm
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index e6b8b8ac8..557bfe022 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -1,7 +1,7 @@
---
- name: restart master api
systemd:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
state: restarted
when:
- not (master_api_service_status_changed | default(false) | bool)
@@ -10,7 +10,7 @@
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 5f4e6df71..9be5508aa 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -16,7 +16,7 @@
- name: Install Master package
package:
- name: "{{ openshift.common.service_type }}-master{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
when:
- not openshift.common.is_containerized | bool
@@ -141,7 +141,7 @@
# The template file will stomp any other settings made.
- block:
- name: check whether our docker-registry setting exists in the env file
- command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift.common.service_type }}-master"
+ command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift_service_type }}-master"
failed_when: false
changed_when: false
register: l_already_set
@@ -203,7 +203,7 @@
- name: Start and enable master api on first master
systemd:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
enabled: yes
state: started
when:
@@ -214,7 +214,7 @@
delay: 60
- name: Dump logs from master-api if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-api
+ command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-api
when:
- l_start_result | failed
@@ -230,7 +230,7 @@
- name: Start and enable master api all masters
systemd:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
enabled: yes
state: started
when:
@@ -241,7 +241,7 @@
delay: 60
- name: Dump logs from master-api if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-api
+ command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-api
when:
- l_start_result | failed
@@ -258,7 +258,7 @@
- name: Start and enable master controller service
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
enabled: yes
state: started
register: l_start_result
@@ -267,7 +267,7 @@
delay: 60
- name: Dump logs from master-controllers if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-controllers
+ command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-controllers
when:
- l_start_result | failed
diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml
index ca04d2243..8b342a5b4 100644
--- a/roles/openshift_master/tasks/registry_auth.yml
+++ b/roles/openshift_master/tasks/registry_auth.yml
@@ -32,7 +32,7 @@
when:
- openshift_docker_alternative_creds | default(False) | bool
- oreg_auth_user is defined
- - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
register: master_oreg_auth_credentials_create_alt
notify:
- restart master api
diff --git a/roles/openshift_master/tasks/restart.yml b/roles/openshift_master/tasks/restart.yml
index 4f8b758fd..715347101 100644
--- a/roles/openshift_master/tasks/restart.yml
+++ b/roles/openshift_master/tasks/restart.yml
@@ -1,7 +1,7 @@
---
- name: Restart master API
service:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
state: restarted
when: openshift_master_ha | bool
- name: Wait for master API to come back online
@@ -14,7 +14,7 @@
when: openshift_master_ha | bool
- name: Restart master controllers
service:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: restarted
# Ignore errrors since it is possible that type != simple for
# pre-3.1.1 installations.
diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml
index 450f6d803..f6c5ce0dd 100644
--- a/roles/openshift_master/tasks/system_container.yml
+++ b/roles/openshift_master/tasks/system_container.yml
@@ -8,12 +8,12 @@
- name: Check Master system container package
command: >
- atomic containers list --no-trunc -a -f container={{ openshift.common.service_type }}-master
+ atomic containers list --no-trunc -a -f container={{ openshift_service_type }}-master
# HA
- name: Install or Update HA api master system container
oc_atomic_container:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
state: latest
values:
@@ -21,7 +21,7 @@
- name: Install or Update HA controller master system container
oc_atomic_container:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
state: latest
values:
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index ee76413e3..76b6f46aa 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -13,7 +13,7 @@
- name: Disable the legacy master service if it exists
systemd:
- name: "{{ openshift.common.service_type }}-master"
+ name: "{{ openshift_service_type }}-master"
state: stopped
enabled: no
masked: yes
@@ -21,7 +21,7 @@
- name: Remove the legacy master service if it exists
file:
- path: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master.service"
+ path: "{{ containerized_svc_dir }}/{{ openshift_service_type }}-master.service"
state: absent
ignore_errors: true
when:
@@ -40,7 +40,7 @@
- name: Create the ha systemd unit files
template:
src: "{{ ha_svc_template_path }}/atomic-openshift-master-{{ item }}.service.j2"
- dest: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master-{{ item }}.service"
+ dest: "{{ containerized_svc_dir }}/{{ openshift_service_type }}-master-{{ item }}.service"
when:
- not l_is_master_system_container | bool
with_items:
@@ -55,7 +55,7 @@
- name: enable master services
systemd:
- name: "{{ openshift.common.service_type }}-master-{{ item }}"
+ name: "{{ openshift_service_type }}-master-{{ item }}"
enabled: yes
with_items:
- api
@@ -64,13 +64,13 @@
- not l_is_master_system_container | bool
- name: Preserve Master API Proxy Config options
- command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ command: grep PROXY /etc/sysconfig/{{ openshift_service_type }}-master-api
register: l_master_api_proxy
failed_when: false
changed_when: false
- name: Preserve Master API AWS options
- command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ command: grep AWS_ /etc/sysconfig/{{ openshift_service_type }}-master-api
register: master_api_aws
failed_when: false
changed_when: false
@@ -78,7 +78,7 @@
- name: Create the master api service env file
template:
src: "{{ ha_svc_template_path }}/atomic-openshift-master-api.j2"
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-api
backup: true
notify:
- restart master api
@@ -89,7 +89,7 @@
- "'http_proxy' not in openshift.common"
- "'https_proxy' not in openshift.common"
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-api
line: "{{ item }}"
with_items: "{{ l_master_api_proxy.stdout_lines | default([]) }}"
@@ -98,19 +98,19 @@
- master_api_aws.rc == 0
- not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined)
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-api
line: "{{ item }}"
with_items: "{{ master_api_aws.stdout_lines | default([]) }}"
no_log: True
- name: Preserve Master Controllers Proxy Config options
- command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ command: grep PROXY /etc/sysconfig/{{ openshift_service_type }}-master-controllers
register: master_controllers_proxy
failed_when: false
changed_when: false
- name: Preserve Master Controllers AWS options
- command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ command: grep AWS_ /etc/sysconfig/{{ openshift_service_type }}-master-controllers
register: master_controllers_aws
failed_when: false
changed_when: false
@@ -118,14 +118,14 @@
- name: Create the master controllers service env file
template:
src: "{{ ha_svc_template_path }}/atomic-openshift-master-controllers.j2"
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-controllers
backup: true
notify:
- restart master controllers
- name: Restore Master Controllers Proxy Config Options
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-controllers
line: "{{ item }}"
with_items: "{{ master_controllers_proxy.stdout_lines | default([]) }}"
when:
@@ -135,7 +135,7 @@
- name: Restore Master Controllers AWS Options
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-controllers
line: "{{ item }}"
with_items: "{{ master_controllers_aws.stdout_lines | default([]) }}"
when:
diff --git a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
index caab3045a..f50b91ff5 100644
--- a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
+++ b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
@@ -12,11 +12,11 @@
package: name={{ master_pkgs | join(',') }} state=present
vars:
master_pkgs:
- - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
- - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-master{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-clients{{ openshift_pkg_version }}"
+ - "tuned-profiles-{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
register: result
until: result | success
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index cec3d3fb1..5e46d9121 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -3,18 +3,18 @@ Description=Atomic OpenShift Master API
Documentation=https://github.com/openshift/origin
After=etcd_container.service
Wants=etcd_container.service
-Before={{ openshift.common.service_type }}-node.service
+Before={{ openshift_service_type }}-node.service
After={{ openshift_docker_service_name }}.service
PartOf={{ openshift_docker_service_name }}.service
Requires={{ openshift_docker_service_name }}.service
[Service]
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-master-api
Environment=GOTRACEBACK=crash
-ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api
+ExecStartPre=-/usr/bin/docker rm -f {{ openshift_service_type}}-master-api
ExecStart=/usr/bin/docker run --rm --privileged --net=host \
- --name {{ openshift.common.service_type }}-master-api \
- --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api \
+ --name {{ openshift_service_type }}-master-api \
+ --env-file=/etc/sysconfig/{{ openshift_service_type }}-master-api \
-v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \
-v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock \
-v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \
@@ -24,14 +24,14 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \
{{ openshift.master.master_image }}:${IMAGE_VERSION} start master api \
--config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
-ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api
+ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-master-api
LimitNOFILE=131072
LimitCORE=infinity
WorkingDirectory={{ r_openshift_master_data_dir }}
-SyslogIdentifier={{ openshift.common.service_type }}-master-api
+SyslogIdentifier={{ openshift_service_type }}-master-api
Restart=always
RestartSec=5s
[Install]
WantedBy={{ openshift_docker_service_name }}.service
-WantedBy={{ openshift.common.service_type }}-node.service
+WantedBy={{ openshift_service_type }}-node.service
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index a0248151d..899575f1a 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -1,19 +1,19 @@
[Unit]
Description=Atomic OpenShift Master Controllers
Documentation=https://github.com/openshift/origin
-Wants={{ openshift.common.service_type }}-master-api.service
-After={{ openshift.common.service_type }}-master-api.service
+Wants={{ openshift_service_type }}-master-api.service
+After={{ openshift_service_type }}-master-api.service
After={{ openshift_docker_service_name }}.service
Requires={{ openshift_docker_service_name }}.service
PartOf={{ openshift_docker_service_name }}.service
[Service]
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-master-controllers
Environment=GOTRACEBACK=crash
-ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers
+ExecStartPre=-/usr/bin/docker rm -f {{ openshift_service_type}}-master-controllers
ExecStart=/usr/bin/docker run --rm --privileged --net=host \
- --name {{ openshift.common.service_type }}-master-controllers \
- --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers \
+ --name {{ openshift_service_type }}-master-controllers \
+ --env-file=/etc/sysconfig/{{ openshift_service_type }}-master-controllers \
-v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \
-v /var/run/docker.sock:/var/run/docker.sock \
-v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \
@@ -23,11 +23,11 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \
{{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers \
--config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
-ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers
+ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-master-controllers
LimitNOFILE=131072
LimitCORE=infinity
WorkingDirectory={{ r_openshift_master_data_dir }}
-SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
+SyslogIdentifier={{ openshift_service_type }}-master-controllers
Restart=always
RestartSec=5s
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
index 02bfd6f62..ed8a47df8 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
@@ -3,12 +3,12 @@ Description=Atomic OpenShift Master API
Documentation=https://github.com/openshift/origin
After=network-online.target
After=etcd.service
-Before={{ openshift.common.service_type }}-node.service
+Before={{ openshift_service_type }}-node.service
Requires=network-online.target
[Service]
Type=notify
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-master-api
Environment=GOTRACEBACK=crash
ExecStart=/usr/bin/openshift start master api --config=${CONFIG_FILE} $OPTIONS
LimitNOFILE=131072
@@ -20,4 +20,4 @@ RestartSec=5s
[Install]
WantedBy=multi-user.target
-WantedBy={{ openshift.common.service_type }}-node.service
+WantedBy={{ openshift_service_type }}-node.service
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
index fae021845..b36963f73 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
@@ -2,19 +2,19 @@
Description=Atomic OpenShift Master Controllers
Documentation=https://github.com/openshift/origin
After=network-online.target
-After={{ openshift.common.service_type }}-master-api.service
-Wants={{ openshift.common.service_type }}-master-api.service
+After={{ openshift_service_type }}-master-api.service
+Wants={{ openshift_service_type }}-master-api.service
Requires=network-online.target
[Service]
Type=notify
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-master-controllers
Environment=GOTRACEBACK=crash
ExecStart=/usr/bin/openshift start master controllers --config=${CONFIG_FILE} $OPTIONS
LimitNOFILE=131072
LimitCORE=infinity
WorkingDirectory={{ r_openshift_master_data_dir }}
-SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
+SyslogIdentifier={{ openshift_service_type }}-master-controllers
Restart=always
RestartSec=5s
diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml
index e0329ee7c..1f4b5a116 100644
--- a/roles/openshift_metrics/handlers/main.yml
+++ b/roles/openshift_metrics/handlers/main.yml
@@ -1,12 +1,12 @@
---
- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
when: (not (master_api_service_status_changed | default(false) | bool))
notify: Verify API Server
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index 67f697924..87ceb8103 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -33,9 +33,9 @@ Notes
Currently we support re-labeling nodes but we don't re-schedule running pods nor remove existing labels. That means you will have to trigger the re-schedulling manually. To re-schedule your pods, just follow the steps below:
```
-oadm manage-node --schedulable=false ${NODE}
-oadm manage-node --drain ${NODE}
-oadm manage-node --schedulable=true ${NODE}
+oc adm manage-node --schedulable=false ${NODE}
+oc adm manage-node --drain ${NODE}
+oc adm manage-node --schedulable=true ${NODE}
````
> If you are using version less than 1.5/3.5 you must replace `--drain` with `--evacuate`.
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index f3867fe4a..fff927944 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -14,7 +14,11 @@ r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }
l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}"
-openshift_service_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'atomic-openshift' }}"
+openshift_service_type_dict:
+ origin: origin
+ openshift-enterprise: atomic-openshift
+
+openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
system_images_registry_dict:
openshift-enterprise: "registry.access.redhat.com"
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 229c6bbed..170a3dc6e 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -34,7 +34,7 @@
- name: restart node
systemd:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
state: restarted
register: l_openshift_node_restart_node_result
until: not l_openshift_node_restart_node_result | failed
diff --git a/roles/openshift_node/tasks/aws.yml b/roles/openshift_node/tasks/aws.yml
index 38c2b794d..a7f1fc116 100644
--- a/roles/openshift_node/tasks/aws.yml
+++ b/roles/openshift_node/tasks/aws.yml
@@ -1,7 +1,7 @@
---
- name: Configure AWS Cloud Provider Settings
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ dest: /etc/sysconfig/{{ openshift_service_type }}-node
regexp: "{{ item.regex }}"
line: "{{ item.line }}"
create: true
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index 741a2234f..e5c80bd09 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -38,7 +38,7 @@
- name: Configure Node Environment Variables
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ dest: /etc/sysconfig/{{ openshift_service_type }}-node
regexp: "^{{ item.key }}="
line: "{{ item.key }}={{ item.value }}"
create: true
@@ -76,7 +76,7 @@
- name: Start and enable node dep
systemd:
daemon_reload: yes
- name: "{{ openshift.common.service_type }}-node-dep"
+ name: "{{ openshift_service_type }}-node-dep"
enabled: yes
state: started
@@ -84,7 +84,7 @@
block:
- name: Start and enable node
systemd:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
enabled: yes
state: started
daemon_reload: yes
@@ -95,7 +95,7 @@
ignore_errors: true
- name: Dump logs from node service if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-node
+ command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-node
when: node_start_result | failed
- name: Abort if node failed to start
diff --git a/roles/openshift_node/tasks/config/configure-node-settings.yml b/roles/openshift_node/tasks/config/configure-node-settings.yml
index 527580481..ebc1426d3 100644
--- a/roles/openshift_node/tasks/config/configure-node-settings.yml
+++ b/roles/openshift_node/tasks/config/configure-node-settings.yml
@@ -1,7 +1,7 @@
---
- name: Configure Node settings
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ dest: /etc/sysconfig/{{ openshift_service_type }}-node
regexp: "{{ item.regex }}"
line: "{{ item.line }}"
create: true
diff --git a/roles/openshift_node/tasks/config/configure-proxy-settings.yml b/roles/openshift_node/tasks/config/configure-proxy-settings.yml
index d60794305..7ddd319d2 100644
--- a/roles/openshift_node/tasks/config/configure-proxy-settings.yml
+++ b/roles/openshift_node/tasks/config/configure-proxy-settings.yml
@@ -1,7 +1,7 @@
---
- name: Configure Proxy Settings
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ dest: /etc/sysconfig/{{ openshift_service_type }}-node
regexp: "{{ item.regex }}"
line: "{{ item.line }}"
create: true
diff --git a/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml
index ee91a88ab..9f1145d12 100644
--- a/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml
+++ b/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml
@@ -1,7 +1,7 @@
---
- name: Install Node dependencies docker service file
template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node-dep.service"
+ dest: "/etc/systemd/system/{{ openshift_service_type }}-node-dep.service"
src: openshift.docker.node.dep.service
notify:
- reload systemd units
diff --git a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
index f92ff79b5..649fc5f6b 100644
--- a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
+++ b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
@@ -1,7 +1,7 @@
---
- name: Install Node docker service file
template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service"
src: openshift.docker.node.service
notify:
- reload systemd units
diff --git a/roles/openshift_node/tasks/docker/upgrade.yml b/roles/openshift_node/tasks/docker/upgrade.yml
index d743d2188..c13a6cf6c 100644
--- a/roles/openshift_node/tasks/docker/upgrade.yml
+++ b/roles/openshift_node/tasks/docker/upgrade.yml
@@ -1,6 +1,6 @@
---
# input variables:
-# - openshift.common.service_type
+# - openshift_service_type
# - openshift.common.is_containerized
# - docker_upgrade_nuke_images
# - docker_version
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
index 1ed4a05c1..f93aed246 100644
--- a/roles/openshift_node/tasks/install.yml
+++ b/roles/openshift_node/tasks/install.yml
@@ -3,14 +3,14 @@
block:
- name: Install Node package
package:
- name: "{{ openshift.common.service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift_service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
register: result
until: result | success
- name: Install sdn-ovs package
package:
- name: "{{ openshift.common.service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift_service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
when:
- openshift_node_use_openshift_sdn | bool
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index d9f3e920d..32c5f495f 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -44,6 +44,15 @@
- name: include node installer
include_tasks: install.yml
+- name: Restart cri-o
+ systemd:
+ name: cri-o
+ enabled: yes
+ state: restarted
+ when: openshift_use_crio
+ register: task_result
+ failed_when: task_result|failed and 'could not find the requested service' not in task_result.msg|lower
+
- name: restart NetworkManager to ensure resolv.conf is present
systemd:
name: NetworkManager
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index 98a391890..98978ec6f 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -8,10 +8,10 @@
- name: Install or Update node system container
oc_atomic_container:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}"
values:
- "DNS_DOMAIN={{ openshift.common.dns_domain }}"
- "DOCKER_SERVICE={{ openshift_docker_service_name }}.service"
- - "MASTER_SERVICE={{ openshift.common.service_type }}.service"
+ - "MASTER_SERVICE={{ openshift_service_type }}.service"
state: latest
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 397e1ba18..c532147b1 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -1,7 +1,7 @@
---
- name: Install Node service file
template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service"
src: "{{ openshift.common.is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}"
when: not l_is_node_system_container | bool
notify:
diff --git a/roles/openshift_node/tasks/upgrade.yml b/roles/openshift_node/tasks/upgrade.yml
index 561b56918..9f333645a 100644
--- a/roles/openshift_node/tasks/upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade.yml
@@ -17,7 +17,7 @@
name: "{{ item }}"
state: stopped
with_items:
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-node"
- openvswitch
failed_when: false
@@ -26,8 +26,8 @@
name: "{{ item }}"
state: stopped
with_items:
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-master-api"
- etcd_container
failed_when: false
when: openshift.common.is_containerized | bool
@@ -80,9 +80,9 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
failed_when: false
when: openshift.common.is_containerized | bool
@@ -91,7 +91,7 @@
name: "{{ item }}"
state: stopped
with_items:
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-node"
- openvswitch
failed_when: false
when: not openshift.common.is_containerized | bool
diff --git a/roles/openshift_node/tasks/upgrade/restart.yml b/roles/openshift_node/tasks/upgrade/restart.yml
index 3f1abceab..65c301783 100644
--- a/roles/openshift_node/tasks/upgrade/restart.yml
+++ b/roles/openshift_node/tasks/upgrade/restart.yml
@@ -1,6 +1,6 @@
---
# input variables:
-# - openshift.common.service_type
+# - openshift_service_type
# - openshift.common.is_containerized
# - openshift.common.hostname
# - openshift.master.api_port
@@ -27,9 +27,9 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
failed_when: false
- name: Wait for master API to come back online
diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
index fcbe1a598..120b93bc3 100644
--- a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
@@ -1,13 +1,13 @@
---
# input variables:
-# - openshift.common.service_type
+# - openshift_service_type
# - component
# - openshift_pkg_version
# - openshift.common.is_atomic
# We verified latest rpm available is suitable, so just yum update.
- name: Upgrade packages
- package: "name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present"
+ package: "name={{ openshift_service_type }}-{{ component }}{{ openshift_pkg_version }} state=present"
register: result
until: result | success
@@ -19,7 +19,7 @@
- name: Install Node service file
template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service"
src: "node.service.j2"
register: l_node_unit
diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service
index 5964ac095..8b43beb07 100644
--- a/roles/openshift_node/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node/templates/openshift.docker.node.dep.service
@@ -1,11 +1,11 @@
[Unit]
Requires={{ openshift_docker_service_name }}.service
After={{ openshift_docker_service_name }}.service
-PartOf={{ openshift.common.service_type }}-node.service
-Before={{ openshift.common.service_type }}-node.service
+PartOf={{ openshift_service_type }}-node.service
+Before={{ openshift_service_type }}-node.service
{% if openshift_use_crio %}Wants=cri-o.service{% endif %}
[Service]
-ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
+ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift_service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift_service_type }}-node-dep; fi"
ExecStop=
-SyslogIdentifier={{ openshift.common.service_type }}-node-dep
+SyslogIdentifier={{ openshift_service_type }}-node-dep
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index 3b33ca542..b174c7023 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -1,5 +1,5 @@
[Unit]
-After={{ openshift.common.service_type }}-master.service
+After={{ openshift_service_type }}-master.service
After={{ openshift_docker_service_name }}.service
After=openvswitch.service
PartOf={{ openshift_docker_service_name }}.service
@@ -10,20 +10,20 @@ PartOf=openvswitch.service
After=ovsdb-server.service
After=ovs-vswitchd.service
{% endif %}
-Wants={{ openshift.common.service_type }}-master.service
-Requires={{ openshift.common.service_type }}-node-dep.service
-After={{ openshift.common.service_type }}-node-dep.service
+Wants={{ openshift_service_type }}-master.service
+Requires={{ openshift_service_type }}-node-dep.service
+After={{ openshift_service_type }}-node-dep.service
Requires=dnsmasq.service
After=dnsmasq.service
[Service]
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
-ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-node
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-node-dep
+ExecStartPre=-/usr/bin/docker rm -f {{ openshift_service_type }}-node
ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \
- --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \
+ExecStart=/usr/bin/docker run --name {{ openshift_service_type }}-node \
+ --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift_service_type }}-node \
-v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \
-e HOST=/rootfs -e HOST_ETC=/host-etc \
-v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}:rslave \
@@ -40,10 +40,10 @@ ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \
{% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
{{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
-ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
+ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-node
ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
ExecStopPost=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:
-SyslogIdentifier={{ openshift.common.service_type }}-node
+SyslogIdentifier={{ openshift_service_type }}-node
Restart=always
RestartSec=5s
diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml
index 6e8792446..e543d753c 100644
--- a/roles/openshift_provisioners/tasks/install_efs.yaml
+++ b/roles/openshift_provisioners/tasks/install_efs.yaml
@@ -66,7 +66,7 @@
- name: "Set anyuid permissions for efs"
command: >
- {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ {{ openshift.common.client_binary}} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy
add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs
register: efs_output
failed_when: efs_output.rc == 1 and 'exists' not in efs_output.stderr
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
index 30e83e79b..0c2fcb2c5 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -23,7 +23,7 @@
state: absent
labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
with_items: "{{ groups.all }}"
- when: glusterfs_wipe
+ when: "'openshift' in hostvars[item] and glusterfs_wipe"
- name: Delete pre-existing GlusterFS config
file:
diff --git a/roles/openshift_version/defaults/main.yml b/roles/openshift_version/defaults/main.yml
index 01a1a7472..354699637 100644
--- a/roles/openshift_version/defaults/main.yml
+++ b/roles/openshift_version/defaults/main.yml
@@ -1,2 +1,10 @@
---
openshift_protect_installed_version: True
+
+openshift_service_type_dict:
+ origin: origin
+ openshift-enterprise: atomic-openshift
+
+openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
+
+openshift_use_crio_only: False
diff --git a/roles/openshift_version/meta/main.yml b/roles/openshift_version/meta/main.yml
index 5d7683120..d0ad4b7d2 100644
--- a/roles/openshift_version/meta/main.yml
+++ b/roles/openshift_version/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_utils
+- role: openshift_facts
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index 4f9158ade..ae0f68a5b 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -101,13 +101,13 @@
when: is_containerized | bool
- block:
- - name: Get available {{ openshift.common.service_type}} version
+ - name: Get available {{ openshift_service_type}} version
repoquery:
- name: "{{ openshift.common.service_type}}"
+ name: "{{ openshift_service_type}}"
ignore_excluders: true
register: rpm_results
- fail:
- msg: "Package {{ openshift.common.service_type}} not found"
+ msg: "Package {{ openshift_service_type}} not found"
when: not rpm_results.results.package_found
- set_fact:
openshift_rpm_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}"
@@ -196,7 +196,7 @@
- openshift_version.startswith(openshift_release) | bool
msg: |-
You requested openshift_release {{ openshift_release }}, which is not matched by
- the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }}
+ the latest OpenShift RPM we detected as {{ openshift_service_type }}-{{ openshift_version }}
on host {{ inventory_hostname }}.
We will only install the latest RPMs, so please ensure you are getting the release
you expect. You may need to adjust your Ansible inventory, modify the repositories
diff --git a/roles/openshift_version/tasks/set_version_rpm.yml b/roles/openshift_version/tasks/set_version_rpm.yml
index c40777bf1..c7ca5ceae 100644
--- a/roles/openshift_version/tasks/set_version_rpm.yml
+++ b/roles/openshift_version/tasks/set_version_rpm.yml
@@ -8,14 +8,14 @@
- openshift_version is not defined
- block:
- - name: Get available {{ openshift.common.service_type}} version
+ - name: Get available {{ openshift_service_type}} version
repoquery:
- name: "{{ openshift.common.service_type}}"
+ name: "{{ openshift_service_type}}"
ignore_excluders: true
register: rpm_results
- fail:
- msg: "Package {{ openshift.common.service_type}} not found"
+ msg: "Package {{ openshift_service_type}} not found"
when: not rpm_results.results.package_found
- set_fact:
diff --git a/roles/rhel_subscribe/README.md b/roles/rhel_subscribe/README.md
new file mode 100644
index 000000000..15eaf4f30
--- /dev/null
+++ b/roles/rhel_subscribe/README.md
@@ -0,0 +1,29 @@
+RHEL Subscribe
+==============
+
+Subscribes the RHEL servers and add the OpenShift enterprise repos.
+
+Role variables
+--------------
+
+### `rhsub_user`
+
+Username for the subscription-manager.
+
+### `rhsub_pass`
+
+Password for the subscription-manager.
+
+### `rhsub_pool`
+
+Name of the pool to attach (optional).
+
+### `rhsub_server`
+
+Custom hostname for the Satellite server (optional).
+
+### `openshift_release`
+
+Version for the OpenShift Enterprise repositories.
+
+Example: `3.6`
diff --git a/roles/rhel_subscribe/defaults/main.yml b/roles/rhel_subscribe/defaults/main.yml
new file mode 100644
index 000000000..80b2ab919
--- /dev/null
+++ b/roles/rhel_subscribe/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+rhsub_pool: 'Red Hat OpenShift Container Platform, Premium*'
diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml
index fa74c9953..8acdfb969 100644
--- a/roles/rhel_subscribe/tasks/enterprise.yml
+++ b/roles/rhel_subscribe/tasks/enterprise.yml
@@ -1,25 +1,18 @@
---
-- name: Disable all repositories
- command: subscription-manager repos --disable="*"
-
-- set_fact:
- default_ose_version: '3.6'
- when: deployment_type == 'openshift-enterprise'
-
- set_fact:
- ose_version: "{{ lookup('env', 'ose_version') | default(default_ose_version, True) }}"
-
-- fail:
- msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type"
+ openshift_release: "{{ openshift_release[1:] }}"
when:
- - deployment_type == 'openshift-enterprise'
- - ose_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6'] )
+ - openshift_release is defined
+ - openshift_release[0] == 'v'
+
+- name: Disable all repositories
+ command: subscription-manager repos --disable="*"
- name: Enable RHEL repositories
command: subscription-manager repos \
--enable="rhel-7-server-rpms" \
--enable="rhel-7-server-extras-rpms" \
- --enable="rhel-7-server-ose-{{ ose_version }}-rpms" \
+ --enable="rhel-7-server-ose-{{ (openshift_release | default('')).split('.')[0:2] | join('.') }}-rpms" \
--enable="rhel-7-fast-datapath-rpms"
register: subscribe_repos
until: subscribe_repos | succeeded
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index f83cf9157..3466b7e44 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -3,23 +3,17 @@
# to make it able to attach to a pool
# to make it able to enable repositories
-- set_fact:
- rhel_subscription_pool: "{{ lookup('env', 'rhel_subscription_pool') | default(rhsub_pool | default('Red Hat OpenShift Container Platform, Premium*')) }}"
- rhel_subscription_user: "{{ lookup('env', 'rhel_subscription_user') | default(rhsub_user | default(omit, True)) }}"
- rhel_subscription_pass: "{{ lookup('env', 'rhel_subscription_pass') | default(rhsub_pass | default(omit, True)) }}"
- rhel_subscription_server: "{{ lookup('env', 'rhel_subscription_server') | default(rhsub_server | default(omit, True)) }}"
-
- fail:
msg: "This role is only supported for Red Hat hosts"
when: ansible_distribution != 'RedHat'
- fail:
- msg: Either rhsub_user or the rhel_subscription_user env variable are required for this role.
- when: rhel_subscription_user is not defined
+ msg: The rhsub_user variable is required for this role.
+ when: rhsub_user is not defined or not rhsub_user
- fail:
- msg: Either rhsub_pass or the rhel_subscription_pass env variable are required for this role.
- when: rhel_subscription_pass is not defined
+ msg: The rhsub_pass variable is required for this role.
+ when: rhsub_pass is not defined or not rhsub_pass
- name: Detecting Atomic Host Operating System
stat:
@@ -27,10 +21,10 @@
register: ostree_booted
- name: Satellite preparation
- command: "rpm -Uvh http://{{ rhel_subscription_server }}/pub/katello-ca-consumer-latest.noarch.rpm"
+ command: "rpm -Uvh http://{{ rhsub_server }}/pub/katello-ca-consumer-latest.noarch.rpm"
args:
creates: /etc/rhsm/ca/katello-server-ca.pem
- when: rhel_subscription_server is defined and rhel_subscription_server
+ when: rhsub_server is defined and rhsub_server
- name: Install Red Hat Subscription manager
yum:
@@ -41,26 +35,26 @@
- name: RedHat subscriptions
redhat_subscription:
- username: "{{ rhel_subscription_user }}"
- password: "{{ rhel_subscription_pass }}"
+ username: "{{ rhsub_user }}"
+ password: "{{ rhsub_pass }}"
register: rh_subscription
until: rh_subscription | succeeded
- name: Retrieve the OpenShift Pool ID
- command: subscription-manager list --available --matches="{{ rhel_subscription_pool }}" --pool-only
+ command: subscription-manager list --available --matches="{{ rhsub_pool }}" --pool-only
register: openshift_pool_id
until: openshift_pool_id | succeeded
changed_when: False
- name: Determine if OpenShift Pool Already Attached
- command: subscription-manager list --consumed --matches="{{ rhel_subscription_pool }}" --pool-only
+ command: subscription-manager list --consumed --matches="{{ rhsub_pool }}" --pool-only
register: openshift_pool_attached
until: openshift_pool_attached | succeeded
changed_when: False
when: openshift_pool_id.stdout == ''
- fail:
- msg: "Unable to find pool matching {{ rhel_subscription_pool }} in available or consumed pools"
+ msg: "Unable to find pool matching {{ rhsub_pool }} in available or consumed pools"
when: openshift_pool_id.stdout == '' and openshift_pool_attached is defined and openshift_pool_attached.stdout == ''
- name: Attach to OpenShift Pool
@@ -71,5 +65,4 @@
- include_tasks: enterprise.yml
when:
- - deployment_type == 'openshift-enterprise'
- not ostree_booted.stat.exists | bool
diff --git a/setup.py b/setup.py
index 5bf48b5ad..5ba050b83 100644
--- a/setup.py
+++ b/setup.py
@@ -334,9 +334,9 @@ class OpenShiftAnsibleSyntaxCheck(Command):
result = self.deprecate_jinja2_in_when(yaml_contents, yaml_file)
has_errors = result or has_errors
- # TODO (rteague): This test will be enabled once we move to Ansible 2.4
- # result = self.deprecate_include(yaml_contents, yaml_file)
- # has_errors = result or has_errors
+ # Check for usage of include: directive
+ result = self.deprecate_include(yaml_contents, yaml_file)
+ has_errors = result or has_errors
if not has_errors:
print('...PASSED')
@@ -345,35 +345,29 @@ class OpenShiftAnsibleSyntaxCheck(Command):
print('-' * 60)
print('Syntax checking playbook: {}'.format(playbook))
- # Error on any entry points in 'common'
- if 'common' in playbook:
- print('{}Invalid entry point playbook. All playbooks must'
- ' start in playbooks/byo{}'.format(self.FAIL, self.ENDC))
- has_errors = True
# --syntax-check each entry point playbook
- else:
- try:
- # Create a host group list to avoid WARNING on unmatched host patterns
- host_group_list = [
- 'etcd,masters,nodes,OSEv3',
- 'oo_all_hosts',
- 'oo_etcd_to_config,oo_new_etcd_to_config,oo_first_etcd,oo_etcd_hosts_to_backup,'
- 'oo_etcd_hosts_to_upgrade,oo_etcd_to_migrate',
- 'oo_masters,oo_masters_to_config,oo_first_master,oo_containerized_master_nodes',
- 'oo_nodes_to_config,oo_nodes_to_upgrade',
- 'oo_nodes_use_kuryr,oo_nodes_use_flannel',
- 'oo_nodes_use_calico,oo_nodes_use_nuage,oo_nodes_use_contiv',
- 'oo_lb_to_config',
- 'oo_nfs_to_config',
- 'glusterfs,glusterfs_registry,']
- subprocess.check_output(
- ['ansible-playbook', '-i ' + ','.join(host_group_list),
- '--syntax-check', playbook]
- )
- except subprocess.CalledProcessError as cpe:
- print('{}Execution failed: {}{}'.format(
- self.FAIL, cpe, self.ENDC))
- has_errors = True
+ try:
+ # Create a host group list to avoid WARNING on unmatched host patterns
+ host_group_list = [
+ 'etcd,masters,nodes,OSEv3',
+ 'oo_all_hosts',
+ 'oo_etcd_to_config,oo_new_etcd_to_config,oo_first_etcd,oo_etcd_hosts_to_backup,'
+ 'oo_etcd_hosts_to_upgrade,oo_etcd_to_migrate',
+ 'oo_masters,oo_masters_to_config,oo_first_master,oo_containerized_master_nodes',
+ 'oo_nodes_to_config,oo_nodes_to_upgrade',
+ 'oo_nodes_use_kuryr,oo_nodes_use_flannel',
+ 'oo_nodes_use_calico,oo_nodes_use_nuage,oo_nodes_use_contiv',
+ 'oo_lb_to_config',
+ 'oo_nfs_to_config',
+ 'glusterfs,glusterfs_registry,']
+ subprocess.check_output(
+ ['ansible-playbook', '-i ' + ','.join(host_group_list),
+ '--syntax-check', playbook]
+ )
+ except subprocess.CalledProcessError as cpe:
+ print('{}Execution failed: {}{}'.format(
+ self.FAIL, cpe, self.ENDC))
+ has_errors = True
if has_errors:
raise SystemExit(1)
diff --git a/test/integration/build-images.sh b/test/integration/build-images.sh
index 74a55fa51..74a55fa51 100755..100644
--- a/test/integration/build-images.sh
+++ b/test/integration/build-images.sh
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml
index 9875de9aa..006a71bd9 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml
@@ -1,6 +1,6 @@
---
# NOTE: this test is probably superfluous since openshift_version already does it
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -15,7 +15,7 @@
- block:
# put the repo back to disabled
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_name: "ose-3.2", repo_enabled: 0 }
- action: openshift_health_check
@@ -23,4 +23,4 @@
checks: [ 'package_availability' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml
index 16ff41673..b4f18e3b5 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml
@@ -1,5 +1,5 @@
---
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -17,4 +17,4 @@
checks: [ 'package_availability' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml
index 9f3aad7bd..7998023ae 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml
@@ -1,5 +1,5 @@
---
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -14,7 +14,7 @@
post_tasks:
- block:
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_name: "break-yum" }
- action: openshift_health_check
@@ -22,4 +22,4 @@
checks: [ 'package_update' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml
index 84e9360f5..3b8b15ff3 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml
@@ -1,5 +1,5 @@
---
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -14,7 +14,7 @@
post_tasks:
- block:
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_name: "break-yum" }
- name: Break the break-yum repo
@@ -29,4 +29,4 @@
checks: [ 'package_update' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml
index f4c1bedfa..269c0250b 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml
@@ -1,5 +1,5 @@
---
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -19,4 +19,4 @@
checks: [ 'package_update' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml
index 409057792..92408a669 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml
@@ -1,5 +1,5 @@
---
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -14,7 +14,7 @@
post_tasks:
- block:
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_name: "break-yum" }
- name: Remove the local repo entirely
@@ -25,4 +25,4 @@
checks: [ 'package_update' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml
index d88f82a4a..4e2b8a50c 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml
@@ -1,5 +1,5 @@
---
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -15,7 +15,7 @@
- block:
# disable extras so we control docker version
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_file: "CentOS-Base", repo_name: "extras", repo_enabled: 0 }
- action: openshift_health_check
@@ -23,4 +23,4 @@
checks: [ 'package_version' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml
index 401ad1e21..e1f8d74e6 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml
@@ -1,6 +1,6 @@
---
# NOTE: this test is probably superfluous since openshift_version already does it
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -16,14 +16,14 @@
- block:
# put the repo back to disabled
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_name: "ose-3.2", repo_enabled: 0 }
# test with wrong repo enabled
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_name: "ose-3.3" }
- action: openshift_health_check
args:
checks: [ 'package_version' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml
index 88613802b..600bbe9c3 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml
@@ -1,5 +1,5 @@
---
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -14,11 +14,11 @@
- block:
# enable repo with extra minor version available
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_name: "ose-3.3" }
# disable extras so we control docker version
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_file: "CentOS-Base", repo_name: "extras", repo_enabled: 0 }
- action: openshift_health_check
@@ -26,4 +26,4 @@
checks: [ 'package_version' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml
index da3f6b844..079ca4253 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml
@@ -1,5 +1,5 @@
---
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -17,4 +17,4 @@
checks: [ 'package_version' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/setup_container.yml b/test/integration/openshift_health_checker/setup_container.yml
index e3459b376..0f0f8d366 100644
--- a/test/integration/openshift_health_checker/setup_container.yml
+++ b/test/integration/openshift_health_checker/setup_container.yml
@@ -46,16 +46,15 @@
- hosts: all
tasks:
-
# run before openshift_version to prevent it breaking
- - include: preflight/playbooks/tasks/enable_repo.yml
+ - include_tasks: preflight/playbooks/tasks/enable_repo.yml
vars: { repo_name: "ose-3.2" }
-- include: ../../../playbooks/init/main.yml
+- import_playbook: ../../../playbooks/init/main.yml
- hosts: all
tasks:
# put it back like it was for the tests
- - include: preflight/playbooks/tasks/enable_repo.yml
+ - include_tasks: preflight/playbooks/tasks/enable_repo.yml
vars: { repo_name: "ose-3.2", enabled: False }
diff --git a/test/integration/run-tests.sh b/test/integration/run-tests.sh
index 680b64602..680b64602 100755..100644
--- a/test/integration/run-tests.sh
+++ b/test/integration/run-tests.sh
diff --git a/tox.ini b/tox.ini
index 46738cae5..9c35915d5 100644
--- a/tox.ini
+++ b/tox.ini
@@ -3,7 +3,6 @@ minversion=2.3.1
envlist =
py{27,35}-{flake8,pylint,unit}
py27-{yamllint,ansible_syntax,generate_validation}
- integration
skipsdist=True
skip_missing_interpreters=True
@@ -14,7 +13,6 @@ deps =
-rtest-requirements.txt
unit: -eutils
py35-flake8: flake8-bugbear==17.3.0
- integration: docker-py==1.10.6
commands =
unit: pytest {posargs}
@@ -23,4 +21,3 @@ commands =
yamllint: python setup.py yamllint
generate_validation: python setup.py generate_validation
ansible_syntax: python setup.py ansible_syntax
- integration: python -c 'print("run test/integration/run-tests.sh")'