summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/adhoc/openshift_hosted_logging_efk.yaml2
-rw-r--r--playbooks/aws/openshift-cluster/install.yml4
-rw-r--r--playbooks/aws/openshift-cluster/provision.yml2
-rw-r--r--playbooks/aws/openshift-cluster/provision_instance.yml2
-rw-r--r--playbooks/aws/openshift-cluster/provision_nodes.yml2
-rw-r--r--playbooks/aws/openshift-cluster/provision_sec_group.yml2
-rw-r--r--playbooks/aws/openshift-cluster/provision_ssh_keypair.yml2
-rw-r--r--playbooks/aws/openshift-cluster/provision_vpc.yml2
-rw-r--r--playbooks/aws/openshift-cluster/seal_ami.yml2
-rw-r--r--playbooks/aws/provisioning_vars.yml.example7
-rw-r--r--playbooks/byo/rhel_subscribe.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml17
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/config.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml5
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml28
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml5
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml5
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml5
-rw-r--r--playbooks/container-runtime/private/build_container_groups.yml6
-rw-r--r--playbooks/container-runtime/private/config.yml15
-rw-r--r--playbooks/container-runtime/private/setup_storage.yml8
-rw-r--r--playbooks/deploy_cluster.yml3
-rw-r--r--playbooks/gcp/provision.yml2
-rw-r--r--playbooks/init/base_packages.yml37
-rw-r--r--playbooks/init/evaluate_groups.yml1
-rw-r--r--playbooks/init/facts.yml74
-rw-r--r--playbooks/init/main.yml9
-rw-r--r--playbooks/init/repos.yml6
-rw-r--r--playbooks/init/sanity_checks.yml60
-rw-r--r--playbooks/init/version.yml32
-rw-r--r--playbooks/openshift-etcd/private/ca.yml2
-rw-r--r--playbooks/openshift-etcd/private/certificates-backup.yml6
-rw-r--r--playbooks/openshift-etcd/private/embedded2external.yml30
-rw-r--r--playbooks/openshift-etcd/private/migrate.yml16
-rw-r--r--playbooks/openshift-etcd/private/redeploy-ca.yml14
-rw-r--r--playbooks/openshift-etcd/private/restart.yml4
-rw-r--r--playbooks/openshift-etcd/private/scaleup.yml4
-rw-r--r--playbooks/openshift-etcd/private/server_certificates.yml2
-rw-r--r--playbooks/openshift-etcd/private/upgrade_backup.yml3
-rw-r--r--playbooks/openshift-etcd/private/upgrade_image_members.yml2
-rw-r--r--playbooks/openshift-etcd/private/upgrade_main.yml2
-rw-r--r--playbooks/openshift-etcd/private/upgrade_rpm_members.yml2
-rw-r--r--playbooks/openshift-etcd/private/upgrade_step.yml4
-rw-r--r--playbooks/openshift-glusterfs/README.md2
-rw-r--r--playbooks/openshift-glusterfs/private/config.yml10
-rw-r--r--playbooks/openshift-hosted/private/config.yml4
-rw-r--r--playbooks/openshift-hosted/private/install_docker_gc.yml2
-rw-r--r--playbooks/openshift-hosted/private/openshift_hosted_create_projects.yml2
-rw-r--r--playbooks/openshift-hosted/private/openshift_hosted_registry.yml2
-rw-r--r--playbooks/openshift-hosted/private/openshift_hosted_registry_storage.yml13
-rw-r--r--playbooks/openshift-hosted/private/openshift_hosted_router.yml2
-rw-r--r--playbooks/openshift-hosted/private/openshift_hosted_wait_for_pods.yml26
-rw-r--r--playbooks/openshift-hosted/private/redeploy-router-certificates.yml2
-rw-r--r--playbooks/openshift-logging/private/config.yml3
-rw-r--r--playbooks/openshift-management/add_many_container_providers.yml2
-rw-r--r--playbooks/openshift-management/private/add_container_provider.yml2
-rw-r--r--playbooks/openshift-management/private/config.yml2
-rw-r--r--playbooks/openshift-management/private/uninstall.yml2
-rw-r--r--playbooks/openshift-master/private/additional_config.yml2
-rw-r--r--playbooks/openshift-master/private/certificates-backup.yml1
-rw-r--r--playbooks/openshift-master/private/config.yml9
-rw-r--r--playbooks/openshift-master/private/redeploy-openshift-ca.yml6
-rw-r--r--playbooks/openshift-master/private/tasks/restart_hosts.yml1
-rw-r--r--playbooks/openshift-master/private/tasks/restart_services.yml2
-rw-r--r--playbooks/openshift-master/private/validate_restart.yml2
-rw-r--r--playbooks/openshift-master/scaleup.yml1
-rw-r--r--playbooks/openshift-metrics/private/config.yml3
-rw-r--r--playbooks/openshift-node/private/additional_config.yml14
-rw-r--r--playbooks/openshift-node/private/configure_nodes.yml1
-rw-r--r--playbooks/openshift-node/private/containerized_nodes.yml1
-rw-r--r--playbooks/openshift-node/private/image_prep.yml7
-rw-r--r--playbooks/openshift-node/private/setup.yml1
-rw-r--r--playbooks/openshift-node/scaleup.yml1
-rw-r--r--playbooks/openshift-web-console/config.yml4
-rw-r--r--playbooks/openshift-web-console/private/config.yml31
l---------playbooks/openshift-web-console/private/roles1
-rw-r--r--playbooks/openstack/advanced-configuration.md120
-rw-r--r--playbooks/openstack/openshift-cluster/install.yml3
-rw-r--r--playbooks/openstack/openshift-cluster/prerequisites.yml4
-rw-r--r--playbooks/openstack/openshift-cluster/provision.yml12
-rw-r--r--playbooks/openstack/sample-inventory/group_vars/OSEv3.yml5
-rw-r--r--playbooks/openstack/sample-inventory/group_vars/all.yml5
-rwxr-xr-xplaybooks/openstack/sample-inventory/inventory.py20
-rw-r--r--playbooks/prerequisites.yml5
94 files changed, 549 insertions, 308 deletions
diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
index 69b2541bb..faeb332ad 100644
--- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml
+++ b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
@@ -10,7 +10,7 @@
- set_fact:
openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}"
tasks:
- - include_role:
+ - import_role:
name: openshift_logging
tasks_from: update_master_config
when: openshift_hosted_logging_deploy | default(false) | bool
diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml
index b03fb0b7f..a3fc82f9a 100644
--- a/playbooks/aws/openshift-cluster/install.yml
+++ b/playbooks/aws/openshift-cluster/install.yml
@@ -2,7 +2,7 @@
- name: Setup the master node group
hosts: localhost
tasks:
- - include_role:
+ - import_role:
name: openshift_aws
tasks_from: setup_master_group.yml
@@ -11,7 +11,7 @@
gather_facts: no
remote_user: root
tasks:
- - include_role:
+ - import_role:
name: openshift_aws
tasks_from: master_facts.yml
diff --git a/playbooks/aws/openshift-cluster/provision.yml b/playbooks/aws/openshift-cluster/provision.yml
index 4b5bd22ea..7dde60b7d 100644
--- a/playbooks/aws/openshift-cluster/provision.yml
+++ b/playbooks/aws/openshift-cluster/provision.yml
@@ -12,6 +12,6 @@
msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
- name: provision cluster
- include_role:
+ import_role:
name: openshift_aws
tasks_from: provision.yml
diff --git a/playbooks/aws/openshift-cluster/provision_instance.yml b/playbooks/aws/openshift-cluster/provision_instance.yml
index 6e843453c..6c7c1f069 100644
--- a/playbooks/aws/openshift-cluster/provision_instance.yml
+++ b/playbooks/aws/openshift-cluster/provision_instance.yml
@@ -7,6 +7,6 @@
gather_facts: no
tasks:
- name: create an instance and prepare for ami
- include_role:
+ import_role:
name: openshift_aws
tasks_from: provision_instance.yml
diff --git a/playbooks/aws/openshift-cluster/provision_nodes.yml b/playbooks/aws/openshift-cluster/provision_nodes.yml
index 44c686e08..82f147865 100644
--- a/playbooks/aws/openshift-cluster/provision_nodes.yml
+++ b/playbooks/aws/openshift-cluster/provision_nodes.yml
@@ -13,6 +13,6 @@
msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
- name: create the node groups
- include_role:
+ import_role:
name: openshift_aws
tasks_from: provision_nodes.yml
diff --git a/playbooks/aws/openshift-cluster/provision_sec_group.yml b/playbooks/aws/openshift-cluster/provision_sec_group.yml
index 7d74a691a..a0d4ec728 100644
--- a/playbooks/aws/openshift-cluster/provision_sec_group.yml
+++ b/playbooks/aws/openshift-cluster/provision_sec_group.yml
@@ -7,7 +7,7 @@
gather_facts: no
tasks:
- name: create security groups
- include_role:
+ import_role:
name: openshift_aws
tasks_from: security_group.yml
when: openshift_aws_create_security_groups | default(True) | bool
diff --git a/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml b/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml
index 3ec683958..d86ff9f9b 100644
--- a/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml
+++ b/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml
@@ -4,7 +4,7 @@
gather_facts: no
tasks:
- name: create an instance and prepare for ami
- include_role:
+ import_role:
name: openshift_aws
tasks_from: ssh_keys.yml
vars:
diff --git a/playbooks/aws/openshift-cluster/provision_vpc.yml b/playbooks/aws/openshift-cluster/provision_vpc.yml
index 0a23a6d32..cf72f6c87 100644
--- a/playbooks/aws/openshift-cluster/provision_vpc.yml
+++ b/playbooks/aws/openshift-cluster/provision_vpc.yml
@@ -4,7 +4,7 @@
gather_facts: no
tasks:
- name: create a vpc
- include_role:
+ import_role:
name: openshift_aws
tasks_from: vpc.yml
when: openshift_aws_create_vpc | default(True) | bool
diff --git a/playbooks/aws/openshift-cluster/seal_ami.yml b/playbooks/aws/openshift-cluster/seal_ami.yml
index 8239a64fb..f315db604 100644
--- a/playbooks/aws/openshift-cluster/seal_ami.yml
+++ b/playbooks/aws/openshift-cluster/seal_ami.yml
@@ -7,6 +7,6 @@
become: no
tasks:
- name: seal the ami
- include_role:
+ import_role:
name: openshift_aws
tasks_from: seal_ami.yml
diff --git a/playbooks/aws/provisioning_vars.yml.example b/playbooks/aws/provisioning_vars.yml.example
index 1491fb868..f6b1a6b5d 100644
--- a/playbooks/aws/provisioning_vars.yml.example
+++ b/playbooks/aws/provisioning_vars.yml.example
@@ -46,7 +46,7 @@ openshift_pkg_version: # -3.7.0
# Name of the subnet in the vpc to use. Needs to be set if using a pre-existing
# vpc + subnet.
-#openshift_aws_subnet_name:
+#openshift_aws_subnet_az:
# -------------- #
# Security Group #
@@ -93,6 +93,11 @@ openshift_aws_ssh_key_name: # myuser_key
# --------- #
# Variables in this section apply to building a node AMI for use in your
# openshift cluster.
+# openshift-ansible will perform the container runtime storage setup when specified
+# The current storage setup with require a drive if using a separate storage device
+# for the container runtime.
+container_runtime_docker_storage_type: overlay2
+container_runtime_docker_storage_setup_device: /dev/xvdb
# must specify a base_ami when building an AMI
openshift_aws_base_ami: # ami-12345678
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index dc9d0a139..f70f05bac 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -6,7 +6,7 @@
roles:
- role: rhel_subscribe
when:
- - deployment_type == 'openshift-enterprise'
+ - openshift_deployment_type == 'openshift-enterprise'
- ansible_distribution == "RedHat"
- rhsub_user is defined
- rhsub_pass is defined
diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
index 372a39e74..6d82fa928 100644
--- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
+++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
@@ -2,7 +2,6 @@
- name: Create local temp directory for syncing certs
hosts: localhost
connection: local
- become: no
gather_facts: no
tasks:
- name: Create local temp directory for syncing certs
@@ -11,8 +10,15 @@
changed_when: false
when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
+ - name: Chmod local temp directory
+ local_action: command chmod 777 "{{ local_cert_sync_tmpdir.stdout }}"
+ changed_when: false
+ when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
+
- name: Create service signer certificate
hosts: oo_first_master
+ roles:
+ - openshift_facts
tasks:
- name: Create remote temp directory for creating certs
command: mktemp -d /tmp/openshift-ansible-XXXXXXX
@@ -65,7 +71,6 @@
- name: Delete local temp directory
hosts: localhost
connection: local
- become: no
gather_facts: no
tasks:
- name: Delete local temp directory
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 5b8746f2a..8392e21ee 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -12,14 +12,11 @@
roles:
- openshift_facts
tasks:
- - set_fact:
- repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
-
- fail:
msg: Cannot upgrade Docker on Atomic operating systems.
when: openshift_is_atomic | bool
- - include_role:
+ - import_role:
name: container_runtime
tasks_from: docker_upgrade_check.yml
when: docker_upgrade is not defined or docker_upgrade | bool
@@ -54,13 +51,19 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ openshift_client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --force --delete-local-data --ignore-daemonsets
+ --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
register: l_docker_upgrade_drain_result
until: not (l_docker_upgrade_drain_result is failed)
- retries: 60
- delay: 60
+ retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
+ delay: 5
+ failed_when:
+ - l_docker_upgrade_drain_result is failed
+ - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
- include_tasks: tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index 1b57521df..f790fd98d 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -1,7 +1,13 @@
---
-###############################################################################
-# Post upgrade - Upgrade default router, default registry and examples
-###############################################################################
+####################################################################################
+# Post upgrade - Upgrade web console, default router, default registry, and examples
+####################################################################################
+- name: Upgrade web console
+ hosts: oo_first_master
+ roles:
+ - role: openshift_web_console
+ when: openshift_web_console_install | default(true) | bool
+
- name: Upgrade default router and default registry
hosts: oo_first_master
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
index de74c8ab8..da63450b8 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/config.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
@@ -1,4 +1,6 @@
---
+# for control-plane upgrade, several variables may be passed in to this play
+# why may affect the tasks here and in imported playbooks.
# Pre-upgrade
- import_playbook: ../initialize_nodes_to_upgrade.yml
@@ -48,6 +50,8 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
+ # l_openshift_version_set_hosts is passed via upgrade_control_plane.yml
+ # l_openshift_version_check_hosts is passed via upgrade_control_plane.yml
# If we're only upgrading nodes, we need to ensure masters are already upgraded
- name: Verify masters are already upgraded
@@ -72,6 +76,6 @@
- name: Verify docker upgrade targets
hosts: "{{ l_upgrade_docker_target_hosts }}"
tasks:
- - include_role:
+ - import_role:
name: container_runtime
tasks_from: docker_upgrade_check.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
index 4713f8633..693ab2d96 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
@@ -5,11 +5,6 @@
hosts: oo_first_master
gather_facts: no
tasks:
- - fail:
- msg: >
- This upgrade is only supported for origin and openshift-enterprise
- deployment types
- when: deployment_type not in ['origin','openshift-enterprise']
# Error out in situations where the user has older versions specified in their
# inventory in any of the openshift_release, openshift_image_tag, and
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 95c37c38c..4c1156f4b 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -5,7 +5,7 @@
when: openshift.common.version is not defined
- name: Update oreg_auth docker login credentials if necessary
- include_role:
+ import_role:
name: container_runtime
tasks_from: registry_auth.yml
when: oreg_auth_user is defined
@@ -49,5 +49,5 @@
fail:
msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
when:
- - deployment_type == 'origin'
+ - openshift_deployment_type == 'origin'
- openshift.common.version is version_compare(openshift_upgrade_min,'<')
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 0263e721d..e89f06f17 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -22,6 +22,8 @@
# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
- name: Pre master upgrade - Upgrade all storage
hosts: oo_first_master
+ roles:
+ - openshift_facts
tasks:
- name: Upgrade all storage
command: >
@@ -49,10 +51,9 @@
vars:
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
+ roles:
+ - openshift_facts
tasks:
- - include_role:
- name: openshift_facts
-
# Run the pre-upgrade hook if defined:
- debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
@@ -60,7 +61,7 @@
- include_tasks: "{{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
- - include_role:
+ - import_role:
name: openshift_master
tasks_from: upgrade.yml
@@ -108,7 +109,6 @@
- name: Gate on master update
hosts: localhost
connection: local
- become: no
tasks:
- set_fact:
master_update_completed: "{{ hostvars
@@ -128,6 +128,7 @@
hosts: oo_masters_to_config
roles:
- { role: openshift_cli }
+ - { role: openshift_facts }
vars:
__master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml"
tasks:
@@ -242,7 +243,6 @@
- name: Gate on reconcile
hosts: localhost
connection: local
- become: no
tasks:
- set_fact:
reconcile_completed: "{{ hostvars
@@ -291,21 +291,25 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --force --delete-local-data --ignore-daemonsets
+ --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_control_plane_drain_result
until: not (l_upgrade_control_plane_drain_result is failed)
- retries: 60
- delay: 60
+ retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
+ delay: 5
+ failed_when:
+ - l_upgrade_control_plane_drain_result is failed
+ - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
roles:
- openshift_facts
post_tasks:
- - include_role:
+ - import_role:
name: openshift_node
tasks_from: upgrade.yml
- vars:
- openshift_node_upgrade_in_progress: True
- name: Set node schedulability
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index ece69a3d5..850442b3b 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -4,11 +4,9 @@
roles:
- role: openshift_facts
tasks:
- - include_role:
+ - import_role:
name: openshift_node
tasks_from: upgrade_pre.yml
- vars:
- openshift_node_upgrade_in_progress: True
- name: Drain and upgrade nodes
hosts: oo_nodes_to_upgrade:!oo_masters_to_config
@@ -35,19 +33,23 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --force --delete-local-data --ignore-daemonsets
+ --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
until: not (l_upgrade_nodes_drain_result is failed)
- retries: 60
- delay: 60
+ retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
+ delay: 5
+ failed_when:
+ - l_upgrade_nodes_drain_result is failed
+ - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
post_tasks:
- - include_role:
+ - import_role:
name: openshift_node
tasks_from: upgrade.yml
- vars:
- openshift_node_upgrade_in_progress: True
- name: Set node schedulability
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
@@ -62,7 +64,7 @@
- name: Re-enable excluders
hosts: oo_nodes_to_upgrade:!oo_masters_to_config
tasks:
- - include_role:
+ - import_role:
name: openshift_excluder
vars:
r_openshift_excluder_action: enable
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
index a90082760..e259b5d09 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
@@ -3,7 +3,7 @@
hosts: localhost
tasks:
- name: build upgrade scale groups
- include_role:
+ import_role:
name: openshift_aws
tasks_from: upgrade_node_group.yml
@@ -50,17 +50,17 @@
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
until: not (l_upgrade_nodes_drain_result is failed)
- retries: "{{ 1 if openshift_upgrade_nodes_drain_timeout | default(0) == '0' else 0 | int }}"
+ retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
delay: 5
failed_when:
- l_upgrade_nodes_drain_result is failed
- - openshift_upgrade_nodes_drain_timeout | default(0) == '0'
+ - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
# Alright, let's clean up!
- name: clean up the old scale group
hosts: localhost
tasks:
- name: clean up scale group
- include_role:
+ import_role:
name: openshift_aws
tasks_from: remove_scale_group.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index a5ad3801d..d520c6aee 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -13,7 +13,7 @@
tasks:
- set_fact:
openshift_upgrade_target: '3.6'
- openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}"
- import_playbook: ../pre/config.yml
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index 1498db4c5..eb5f07ae0 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -14,16 +14,21 @@
- import_playbook: ../init.yml
vars:
l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
openshift_upgrade_target: '3.6'
- openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}"
- import_playbook: ../pre/config.yml
+ # These vars a meant to exclude oo_nodes from plays that would otherwise include
+ # them by default.
vars:
+ l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
l_upgrade_no_proxy_hosts: "oo_masters_to_config"
l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index 6958652d8..4febe76ee 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -15,7 +15,7 @@
tasks:
- set_fact:
openshift_upgrade_target: '3.6'
- openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}"
- import_playbook: ../pre/config.yml
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index 1750148d4..8d42e4c91 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -14,6 +14,7 @@
- import_playbook: ../init.yml
vars:
l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
@@ -23,7 +24,11 @@
openshift_upgrade_min: '3.6'
- import_playbook: ../pre/config.yml
+ # These vars a meant to exclude oo_nodes from plays that would otherwise include
+ # them by default.
vars:
+ l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
l_upgrade_no_proxy_hosts: "oo_masters_to_config"
l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
index 49e691352..9c7688981 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
@@ -7,6 +7,7 @@
hosts: oo_first_master
roles:
- { role: lib_openshift }
+ - { role: openshift_facts }
tasks:
- name: Check for invalid namespaces and SDN errors
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index 08bfd239f..a2f316c25 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -14,6 +14,7 @@
- import_playbook: ../init.yml
vars:
l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
@@ -23,7 +24,11 @@
openshift_upgrade_min: '3.7'
- import_playbook: ../pre/config.yml
+ # These vars a meant to exclude oo_nodes from plays that would otherwise include
+ # them by default.
vars:
+ l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
l_upgrade_no_proxy_hosts: "oo_masters_to_config"
l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
index 0aea5069d..552bea5e7 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -41,13 +41,13 @@
roles:
- role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index 05aa737c6..ef9871008 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -14,6 +14,7 @@
- import_playbook: ../init.yml
vars:
l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
@@ -23,7 +24,11 @@
openshift_upgrade_min: '3.7'
- import_playbook: ../pre/config.yml
+ # These vars a meant to exclude oo_nodes from plays that would otherwise include
+ # them by default.
vars:
+ l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
l_upgrade_no_proxy_hosts: "oo_masters_to_config"
l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
diff --git a/playbooks/container-runtime/private/build_container_groups.yml b/playbooks/container-runtime/private/build_container_groups.yml
new file mode 100644
index 000000000..7fd60743c
--- /dev/null
+++ b/playbooks/container-runtime/private/build_container_groups.yml
@@ -0,0 +1,6 @@
+---
+- name: create oo_hosts_containerized_managed_true host group
+ hosts: oo_all_hosts:!oo_nodes_to_config
+ tasks:
+ - group_by:
+ key: oo_hosts_containerized_managed_{{ (containerized | default(False)) | ternary('true','false') }}
diff --git a/playbooks/container-runtime/private/config.yml b/playbooks/container-runtime/private/config.yml
index 67445edeb..7a49adcf0 100644
--- a/playbooks/container-runtime/private/config.yml
+++ b/playbooks/container-runtime/private/config.yml
@@ -1,26 +1,23 @@
---
-- hosts: "{{ l_containerized_host_groups }}"
- vars:
- l_chg_temp: "{{ openshift_containerized_host_groups | default([]) }}"
- l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}"
- # role: container_runtime is necessary here to bring role default variables
- # into the play scope.
+- import_playbook: build_container_groups.yml
+
+- hosts: oo_nodes_to_config:oo_hosts_containerized_managed_true
roles:
- role: container_runtime
tasks:
- - include_role:
+ - import_role:
name: container_runtime
tasks_from: package_docker.yml
when:
- not openshift_docker_use_system_container | bool
- not openshift_use_crio_only | bool
- - include_role:
+ - import_role:
name: container_runtime
tasks_from: systemcontainer_docker.yml
when:
- openshift_docker_use_system_container | bool
- not openshift_use_crio_only | bool
- - include_role:
+ - import_role:
name: container_runtime
tasks_from: systemcontainer_crio.yml
when:
diff --git a/playbooks/container-runtime/private/setup_storage.yml b/playbooks/container-runtime/private/setup_storage.yml
index 97226d6b2..a6d396270 100644
--- a/playbooks/container-runtime/private/setup_storage.yml
+++ b/playbooks/container-runtime/private/setup_storage.yml
@@ -1,14 +1,16 @@
---
-- hosts: "{{ l_containerized_host_groups }}"
+- import_playbook: build_container_groups.yml
+
+- hosts: oo_nodes_to_config:oo_hosts_containerized_managed_true
vars:
- l_chg_temp: "{{ openshift_containerized_host_groups | default([]) }}"
+ l_chg_temp: "{{ hostvars[groups['oo_first_master'][0]]['openshift_containerized_host_groups'] | default([]) }}"
l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}"
# role: container_runtime is necessary here to bring role default variables
# into the play scope.
roles:
- role: container_runtime
tasks:
- - include_role:
+ - import_role:
name: container_runtime
tasks_from: docker_storage_setup_overlay.yml
when:
diff --git a/playbooks/deploy_cluster.yml b/playbooks/deploy_cluster.yml
index 0e6bde09a..5efdc486a 100644
--- a/playbooks/deploy_cluster.yml
+++ b/playbooks/deploy_cluster.yml
@@ -22,6 +22,9 @@
- import_playbook: openshift-hosted/private/config.yml
+- import_playbook: openshift-web-console/private/config.yml
+ when: openshift_web_console_install | default(true) | bool
+
- import_playbook: openshift-metrics/private/config.yml
when: openshift_metrics_install_metrics | default(false) | bool
diff --git a/playbooks/gcp/provision.yml b/playbooks/gcp/provision.yml
index 6016e6a78..b6edf9961 100644
--- a/playbooks/gcp/provision.yml
+++ b/playbooks/gcp/provision.yml
@@ -6,7 +6,7 @@
tasks:
- name: provision a GCP cluster in the specified project
- include_role:
+ import_role:
name: openshift_gcp
- name: run the cluster deploy
diff --git a/playbooks/init/base_packages.yml b/playbooks/init/base_packages.yml
new file mode 100644
index 000000000..15b3dd492
--- /dev/null
+++ b/playbooks/init/base_packages.yml
@@ -0,0 +1,37 @@
+---
+- name: Install packages necessary for installer
+ hosts: oo_all_hosts
+ any_errors_fatal: true
+ tasks:
+ - when:
+ - not openshift_is_atomic | bool
+ block:
+ - name: Ensure openshift-ansible installer package deps are installed
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - iproute
+ - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
+ - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
+ - yum-utils
+ register: result
+ until: result is succeeded
+
+ - name: Ensure various deps for running system containers are installed
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - atomic
+ - ostree
+ - runc
+ when:
+ - >
+ (openshift_use_system_containers | default(False)) | bool
+ or (openshift_use_etcd_system_container | default(False)) | bool
+ or (openshift_use_openvswitch_system_container | default(False)) | bool
+ or (openshift_use_node_system_container | default(False)) | bool
+ or (openshift_use_master_system_container | default(False)) | bool
+ register: result
+ until: result is succeeded
diff --git a/playbooks/init/evaluate_groups.yml b/playbooks/init/evaluate_groups.yml
index 8087f6ffc..c4cd226c9 100644
--- a/playbooks/init/evaluate_groups.yml
+++ b/playbooks/init/evaluate_groups.yml
@@ -2,7 +2,6 @@
- name: Populate config host groups
hosts: localhost
connection: local
- become: no
gather_facts: no
tasks:
- name: Load group name mapping variables
diff --git a/playbooks/init/facts.yml b/playbooks/init/facts.yml
index ac4429b23..8e4206948 100644
--- a/playbooks/init/facts.yml
+++ b/playbooks/init/facts.yml
@@ -5,7 +5,9 @@
tasks:
- name: Initialize host facts
- hosts: oo_all_hosts
+ # l_upgrade_non_node_hosts is passed in via play during control-plane-only
+ # upgrades; otherwise oo_all_hosts is used.
+ hosts: "{{ l_upgrade_non_node_hosts | default('oo_all_hosts') }}"
tasks:
- name: load openshift_facts module
import_role:
@@ -13,7 +15,7 @@
# TODO: Should this role be refactored into health_checks??
- name: Run openshift_sanitize_inventory to set variables
- include_role:
+ import_role:
name: openshift_sanitize_inventory
- name: Detecting Operating System from ostree_booted
@@ -21,6 +23,14 @@
path: /run/ostree-booted
register: ostree_booted
+ # TODO(michaelgugino) remove this line once CI is updated.
+ - name: set openshift_deployment_type if unset
+ set_fact:
+ openshift_deployment_type: "{{ deployment_type }}"
+ when:
+ - openshift_deployment_type is undefined
+ - deployment_type is defined
+
- name: initialize_facts set fact openshift_is_atomic and openshift_is_containerized
set_fact:
openshift_is_atomic: "{{ ostree_booted.stat.exists }}"
@@ -28,26 +38,6 @@
# TODO: Should this be moved into health checks??
# Seems as though any check that happens with a corresponding fail should move into health_checks
- - name: Validate python version - ans_dist is fedora and python is v3
- fail:
- msg: |
- openshift-ansible requires Python 3 for {{ ansible_distribution }};
- For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html
- when:
- - ansible_distribution == 'Fedora'
- - ansible_python['version']['major'] != 3
-
- # TODO: Should this be moved into health checks??
- # Seems as though any check that happens with a corresponding fail should move into health_checks
- - name: Validate python version - ans_dist not Fedora and python must be v2
- fail:
- msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}"
- when:
- - ansible_distribution != 'Fedora'
- - ansible_python['version']['major'] != 2
-
- # TODO: Should this be moved into health checks??
- # Seems as though any check that happens with a corresponding fail should move into health_checks
# Fail as early as possible if Atomic and old version of Docker
- when:
- openshift_is_atomic | bool
@@ -68,39 +58,6 @@
- l_atomic_docker_version.stdout | replace('"', '') is version_compare('1.12','>=')
msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.
- - when:
- - not openshift_is_atomic | bool
- block:
- - name: Ensure openshift-ansible installer package deps are installed
- package:
- name: "{{ item }}"
- state: present
- with_items:
- - iproute
- - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
- - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
- - yum-utils
- register: result
- until: result is succeeded
-
- - name: Ensure various deps for running system containers are installed
- package:
- name: "{{ item }}"
- state: present
- with_items:
- - atomic
- - ostree
- - runc
- when:
- - >
- (openshift_use_system_containers | default(False)) | bool
- or (openshift_use_etcd_system_container | default(False)) | bool
- or (openshift_use_openvswitch_system_container | default(False)) | bool
- or (openshift_use_node_system_container | default(False)) | bool
- or (openshift_use_master_system_container | default(False)) | bool
- register: result
- until: result is succeeded
-
- name: Gather Cluster facts
openshift_facts:
role: common
@@ -136,11 +93,6 @@
local_facts:
sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
- - name: initialize_facts set_fact repoquery command
- set_fact:
- repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
- repoquery_installed: "{{ 'dnf repoquery --latest-limit 1 -d 0 --disableexcludes=all --installed' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins --installed' }}"
-
- name: Initialize special first-master variables
hosts: oo_first_master
roles:
@@ -150,3 +102,5 @@
# We need to setup openshift_client_binary here for special uses of delegate_to in
# later roles and plays.
first_master_client_binary: "{{ openshift_client_binary }}"
+ #Some roles may require this to be set for first master
+ openshift_client_binary: "{{ openshift_client_binary }}"
diff --git a/playbooks/init/main.yml b/playbooks/init/main.yml
index 06e8ba504..8a3f4682d 100644
--- a/playbooks/init/main.yml
+++ b/playbooks/init/main.yml
@@ -17,15 +17,12 @@
- import_playbook: facts.yml
-- import_playbook: sanity_checks.yml
- when: not (skip_sanity_checks | default(False))
-
-- import_playbook: validate_hostnames.yml
- when: not (skip_validate_hostnames | default(False))
-
- import_playbook: version.yml
when: not (skip_verison | default(False))
+- import_playbook: sanity_checks.yml
+ when: not (skip_sanity_checks | default(False))
+
- name: Initialization Checkpoint End
hosts: all
gather_facts: false
diff --git a/playbooks/init/repos.yml b/playbooks/init/repos.yml
index 66786a41a..667f38ddd 100644
--- a/playbooks/init/repos.yml
+++ b/playbooks/init/repos.yml
@@ -4,13 +4,13 @@
gather_facts: no
tasks:
- name: subscribe instances to Red Hat Subscription Manager
- include_role:
+ import_role:
name: rhel_subscribe
when:
- ansible_distribution == 'RedHat'
- - deployment_type == 'openshift-enterprise'
+ - openshift_deployment_type == 'openshift-enterprise'
- rhsub_user is defined
- rhsub_pass is defined
- name: initialize openshift repos
- include_role:
+ import_role:
name: openshift_repos
diff --git a/playbooks/init/sanity_checks.yml b/playbooks/init/sanity_checks.yml
index 26716a92d..52bcf42c0 100644
--- a/playbooks/init/sanity_checks.yml
+++ b/playbooks/init/sanity_checks.yml
@@ -1,51 +1,15 @@
---
- name: Verify Requirements
- hosts: oo_all_hosts
+ hosts: oo_first_master
+ roles:
+ - role: lib_utils
tasks:
- - fail:
- msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool
-
- - fail:
- msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool
-
- - fail:
- msg: Nuage sdn can not be used with flannel
- when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool
-
- - fail:
- msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool
-
- - fail:
- msg: Contiv can not be used with flannel
- when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool
-
- - fail:
- msg: Contiv can not be used with nuage
- when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool
-
- - fail:
- msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool
-
- - fail:
- msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both.
- when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool
-
- - fail:
- msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both
- when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool
-
- - fail:
- msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both
- when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool
-
- - fail:
- msg: openshift_hostname must be 63 characters or less
- when: openshift_hostname is defined and openshift_hostname | length > 63
-
- - fail:
- msg: openshift_public_hostname must be 63 characters or less
- when: openshift_public_hostname is defined and openshift_public_hostname | length > 63
+ # sanity_checks is a custom action plugin defined in lib_utils.
+ # This module will loop through all the hostvars for each host
+ # specified in check_hosts.
+ # Since sanity_checks is an action_plugin, it executes on the control host.
+ # Thus, sanity_checks cannot gather new information about any hosts.
+ - name: Run variable sanity checks
+ sanity_checks:
+ check_hosts: "{{ groups['oo_all_hosts'] }}"
+ run_once: True
diff --git a/playbooks/init/version.yml b/playbooks/init/version.yml
index 37a5284d5..962ee7220 100644
--- a/playbooks/init/version.yml
+++ b/playbooks/init/version.yml
@@ -2,20 +2,32 @@
# NOTE: requires openshift_facts be run
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
- roles:
- - openshift_version
+ tasks:
+ - include_role:
+ name: openshift_version
+ tasks_from: first_master.yml
+ - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version | default('') }}"
# NOTE: We set this even on etcd hosts as they may also later run as masters,
# and we don't want to install wrong version of docker and have to downgrade
# later.
- name: Set openshift_version for etcd, node, and master hosts
- hosts: oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master
+ hosts: "{{ l_openshift_version_set_hosts | default(l_default_version_set_hosts) }}"
vars:
- openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
- pre_tasks:
+ l_default_version_set_hosts: "oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master"
+ l_first_master_openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
+ l_first_master_openshift_pkg_version: "{{ hostvars[groups.oo_first_master.0].openshift_pkg_version | default('') }}"
+ l_first_master_openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag}}"
+ tasks:
- set_fact:
- openshift_pkg_version: -{{ openshift_version }}
- when: openshift_pkg_version is not defined
- - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version }}"
- roles:
- - openshift_version
+ openshift_version: "{{ l_first_master_openshift_version }}"
+ openshift_pkg_version: "{{ l_first_master_openshift_pkg_version }}"
+ openshift_image_tag: "{{ l_first_master_openshift_image_tag }}"
+
+# NOTE: These steps should only be run against masters and nodes.
+- name: Ensure the requested version packages are available.
+ hosts: "{{ l_openshift_version_check_hosts | default('oo_nodes_to_config:oo_masters_to_config:!oo_first_master') }}"
+ tasks:
+ - include_role:
+ name: openshift_version
+ tasks_from: masters_and_nodes.yml
diff --git a/playbooks/openshift-etcd/private/ca.yml b/playbooks/openshift-etcd/private/ca.yml
index f3bb3c2d1..72c39d546 100644
--- a/playbooks/openshift-etcd/private/ca.yml
+++ b/playbooks/openshift-etcd/private/ca.yml
@@ -5,7 +5,7 @@
- role: openshift_clock
- role: openshift_etcd_facts
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: ca.yml
vars:
diff --git a/playbooks/openshift-etcd/private/certificates-backup.yml b/playbooks/openshift-etcd/private/certificates-backup.yml
index ce21a1f96..2f9bef799 100644
--- a/playbooks/openshift-etcd/private/certificates-backup.yml
+++ b/playbooks/openshift-etcd/private/certificates-backup.yml
@@ -3,10 +3,10 @@
hosts: oo_first_etcd
any_errors_fatal: true
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: backup_generated_certificates.yml
- - include_role:
+ - import_role:
name: etcd
tasks_from: remove_generated_certificates.yml
@@ -14,6 +14,6 @@
hosts: oo_etcd_to_config
any_errors_fatal: true
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: backup_server_certificates.yml
diff --git a/playbooks/openshift-etcd/private/embedded2external.yml b/playbooks/openshift-etcd/private/embedded2external.yml
index be177b714..674bd5088 100644
--- a/playbooks/openshift-etcd/private/embedded2external.yml
+++ b/playbooks/openshift-etcd/private/embedded2external.yml
@@ -18,7 +18,7 @@
- role: openshift_facts
tasks:
- name: Check the master API is ready
- include_role:
+ import_role:
name: openshift_master
tasks_from: check_master_api_is_ready.yml
- set_fact:
@@ -31,8 +31,8 @@
name: "{{ master_service }}"
state: stopped
# 2. backup embedded etcd
- # Can't use with_items with include_role: https://github.com/ansible/ansible/issues/21285
- - include_role:
+ # Can't use with_items with import_role: https://github.com/ansible/ansible/issues/21285
+ - import_role:
name: etcd
tasks_from: backup.yml
vars:
@@ -40,7 +40,7 @@
r_etcd_common_embedded_etcd: "{{ true }}"
r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}"
- - include_role:
+ - import_role:
name: etcd
tasks_from: backup.archive.yml
vars:
@@ -56,7 +56,7 @@
- name: Backup etcd client certificates for master host
hosts: oo_first_master
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: backup_master_etcd_certificates.yml
@@ -73,10 +73,10 @@
hosts: oo_etcd_to_config[0]
gather_facts: no
pre_tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: disable_etcd.yml
- - include_role:
+ - import_role:
name: etcd
tasks_from: clean_data.yml
@@ -89,9 +89,12 @@
local_action: command mktemp -d /tmp/etcd_backup-XXXXXXX
register: g_etcd_client_mktemp
changed_when: False
- become: no
- - include_role:
+ - name: Chmod local temp directory for syncing etcd backup
+ local_action: command chmod 777 "{{ g_etcd_client_mktemp.stdout }}"
+ changed_when: False
+
+ - import_role:
name: etcd
tasks_from: backup.fetch.yml
vars:
@@ -101,7 +104,7 @@
r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
delegate_to: "{{ groups.oo_first_master[0] }}"
- - include_role:
+ - import_role:
name: etcd
tasks_from: backup.copy.yml
vars:
@@ -116,20 +119,19 @@
- name: Delete temporary directory
local_action: file path="{{ g_etcd_client_mktemp.stdout }}" state=absent
changed_when: False
- become: no
# 7. force new cluster from the backup
- name: Force new etcd cluster
hosts: oo_etcd_to_config[0]
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: backup.unarchive.yml
vars:
r_etcd_common_backup_tag: pre-migrate
r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
- - include_role:
+ - import_role:
name: etcd
tasks_from: backup.force_new_cluster.yml
vars:
@@ -143,7 +145,7 @@
- name: Configure master to use external etcd
hosts: oo_first_master
tasks:
- - include_role:
+ - import_role:
name: openshift_master
tasks_from: configure_external_etcd.yml
vars:
diff --git a/playbooks/openshift-etcd/private/migrate.yml b/playbooks/openshift-etcd/private/migrate.yml
index cad0ebcaa..3f8b44032 100644
--- a/playbooks/openshift-etcd/private/migrate.yml
+++ b/playbooks/openshift-etcd/private/migrate.yml
@@ -2,7 +2,6 @@
- name: Check if the master has embedded etcd
hosts: localhost
connection: local
- become: no
gather_facts: no
tags:
- always
@@ -15,7 +14,7 @@
- name: Run pre-checks
hosts: oo_etcd_to_migrate
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: migrate.pre_check.yml
vars:
@@ -43,7 +42,7 @@
roles:
- role: openshift_facts
post_tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: backup.yml
vars:
@@ -53,7 +52,6 @@
- name: Gate on etcd backup
hosts: localhost
connection: local
- become: no
tasks:
- set_fact:
etcd_backup_completed: "{{ hostvars
@@ -70,7 +68,7 @@
hosts: oo_etcd_to_migrate
gather_facts: no
pre_tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: disable_etcd.yml
@@ -78,7 +76,7 @@
hosts: oo_etcd_to_migrate[0]
gather_facts: no
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: migrate.yml
vars:
@@ -90,7 +88,7 @@
hosts: oo_etcd_to_migrate[1:]
gather_facts: no
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: clean_data.yml
vars:
@@ -126,7 +124,7 @@
- name: Add TTLs on the first master
hosts: oo_first_master[0]
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: migrate.add_ttls.yml
vars:
@@ -138,7 +136,7 @@
- name: Configure masters if etcd data migration is succesfull
hosts: oo_masters_to_config
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: migrate.configure_master.yml
when: etcd_migration_failed | length == 0
diff --git a/playbooks/openshift-etcd/private/redeploy-ca.yml b/playbooks/openshift-etcd/private/redeploy-ca.yml
index 0995945cc..a3acf6945 100644
--- a/playbooks/openshift-etcd/private/redeploy-ca.yml
+++ b/playbooks/openshift-etcd/private/redeploy-ca.yml
@@ -14,10 +14,10 @@
- name: Backup existing etcd CA certificate directories
hosts: oo_etcd_to_config
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: backup_ca_certificates.yml
- - include_role:
+ - import_role:
name: etcd
tasks_from: remove_ca_certificates.yml
@@ -26,7 +26,6 @@
- name: Create temp directory for syncing certs
hosts: localhost
connection: local
- become: no
gather_facts: no
tasks:
- name: Create local temp directory for syncing certs
@@ -34,10 +33,14 @@
register: g_etcd_mktemp
changed_when: false
+ - name: Chmod local temp directory for syncing certs
+ local_action: command chmod 777 "{{ g_etcd_mktemp.stdout }}"
+ changed_when: false
+
- name: Distribute etcd CA to etcd hosts
hosts: oo_etcd_to_config
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: distribute_ca.yml
vars:
@@ -54,7 +57,7 @@
- name: Retrieve etcd CA certificate
hosts: oo_first_etcd
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: retrieve_ca_certificates.yml
vars:
@@ -74,7 +77,6 @@
- name: Delete temporary directory on localhost
hosts: localhost
connection: local
- become: no
gather_facts: no
tasks:
- file:
diff --git a/playbooks/openshift-etcd/private/restart.yml b/playbooks/openshift-etcd/private/restart.yml
index 0751480e2..a2a53651b 100644
--- a/playbooks/openshift-etcd/private/restart.yml
+++ b/playbooks/openshift-etcd/private/restart.yml
@@ -3,7 +3,7 @@
hosts: oo_etcd_to_config
serial: 1
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: restart.yml
when:
@@ -12,7 +12,7 @@
- name: Restart etcd
hosts: oo_etcd_to_config
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: restart.yml
when:
diff --git a/playbooks/openshift-etcd/private/scaleup.yml b/playbooks/openshift-etcd/private/scaleup.yml
index dc667958f..8a9811a25 100644
--- a/playbooks/openshift-etcd/private/scaleup.yml
+++ b/playbooks/openshift-etcd/private/scaleup.yml
@@ -30,7 +30,7 @@
retries: 3
delay: 10
until: etcd_add_check.rc == 0
- - include_role:
+ - import_role:
name: etcd
tasks_from: server_certificates.yml
vars:
@@ -76,6 +76,6 @@
roles:
- role: openshift_master_facts
post_tasks:
- - include_role:
+ - import_role:
name: openshift_master
tasks_from: update_etcd_client_urls.yml
diff --git a/playbooks/openshift-etcd/private/server_certificates.yml b/playbooks/openshift-etcd/private/server_certificates.yml
index 695b53990..ebcf4a5ff 100644
--- a/playbooks/openshift-etcd/private/server_certificates.yml
+++ b/playbooks/openshift-etcd/private/server_certificates.yml
@@ -5,7 +5,7 @@
roles:
- role: openshift_etcd_facts
post_tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: server_certificates.yml
vars:
diff --git a/playbooks/openshift-etcd/private/upgrade_backup.yml b/playbooks/openshift-etcd/private/upgrade_backup.yml
index 0d8943d93..081c024fc 100644
--- a/playbooks/openshift-etcd/private/upgrade_backup.yml
+++ b/playbooks/openshift-etcd/private/upgrade_backup.yml
@@ -4,7 +4,7 @@
roles:
- role: openshift_etcd_facts
post_tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: backup.yml
vars:
@@ -14,7 +14,6 @@
- name: Gate on etcd backup
hosts: localhost
connection: local
- become: no
tasks:
- set_fact:
etcd_backup_completed: "{{ hostvars
diff --git a/playbooks/openshift-etcd/private/upgrade_image_members.yml b/playbooks/openshift-etcd/private/upgrade_image_members.yml
index d4386249e..f9e50e748 100644
--- a/playbooks/openshift-etcd/private/upgrade_image_members.yml
+++ b/playbooks/openshift-etcd/private/upgrade_image_members.yml
@@ -6,7 +6,7 @@
hosts: oo_etcd_hosts_to_upgrade
serial: 1
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: upgrade_image.yml
vars:
diff --git a/playbooks/openshift-etcd/private/upgrade_main.yml b/playbooks/openshift-etcd/private/upgrade_main.yml
index e373a4a4c..8997680f9 100644
--- a/playbooks/openshift-etcd/private/upgrade_main.yml
+++ b/playbooks/openshift-etcd/private/upgrade_main.yml
@@ -14,7 +14,7 @@
- name: Drop etcdctl profiles
hosts: oo_etcd_hosts_to_upgrade
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: drop_etcdctl.yml
diff --git a/playbooks/openshift-etcd/private/upgrade_rpm_members.yml b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml
index f7fe6cd9c..e78cc5826 100644
--- a/playbooks/openshift-etcd/private/upgrade_rpm_members.yml
+++ b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml
@@ -6,7 +6,7 @@
hosts: oo_etcd_hosts_to_upgrade
serial: 1
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: upgrade_rpm.yml
vars:
diff --git a/playbooks/openshift-etcd/private/upgrade_step.yml b/playbooks/openshift-etcd/private/upgrade_step.yml
index 05c543d62..6aec838d4 100644
--- a/playbooks/openshift-etcd/private/upgrade_step.yml
+++ b/playbooks/openshift-etcd/private/upgrade_step.yml
@@ -2,7 +2,7 @@
- name: Determine etcd version
hosts: oo_etcd_hosts_to_upgrade
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: version_detect.yml
@@ -54,7 +54,7 @@
hosts: oo_etcd_hosts_to_upgrade
serial: 1
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: upgrade_image.yml
vars:
diff --git a/playbooks/openshift-glusterfs/README.md b/playbooks/openshift-glusterfs/README.md
index 107bbfff6..19c381490 100644
--- a/playbooks/openshift-glusterfs/README.md
+++ b/playbooks/openshift-glusterfs/README.md
@@ -63,7 +63,7 @@ glusterfs
[OSEv3:vars]
ansible_ssh_user=root
-deployment_type=origin
+openshift_deployment_type=origin
[masters]
master
diff --git a/playbooks/openshift-glusterfs/private/config.yml b/playbooks/openshift-glusterfs/private/config.yml
index 19e14ab3e..9a5bc143d 100644
--- a/playbooks/openshift-glusterfs/private/config.yml
+++ b/playbooks/openshift-glusterfs/private/config.yml
@@ -14,12 +14,12 @@
- name: Open firewall ports for GlusterFS nodes
hosts: glusterfs
tasks:
- - include_role:
+ - import_role:
name: openshift_storage_glusterfs
tasks_from: firewall.yml
when:
- openshift_storage_glusterfs_is_native | default(True) | bool
- - include_role:
+ - import_role:
name: openshift_storage_glusterfs
tasks_from: kernel_modules.yml
when:
@@ -28,12 +28,12 @@
- name: Open firewall ports for GlusterFS registry nodes
hosts: glusterfs_registry
tasks:
- - include_role:
+ - import_role:
name: openshift_storage_glusterfs
tasks_from: firewall.yml
when:
- openshift_storage_glusterfs_registry_is_native | default(True) | bool
- - include_role:
+ - import_role:
name: openshift_storage_glusterfs
tasks_from: kernel_modules.yml
when:
@@ -43,7 +43,7 @@
hosts: oo_first_master
tasks:
- name: setup glusterfs
- include_role:
+ import_role:
name: openshift_storage_glusterfs
when: groups.oo_glusterfs_to_config | default([]) | count > 0
diff --git a/playbooks/openshift-hosted/private/config.yml b/playbooks/openshift-hosted/private/config.yml
index 036fe654d..4e7b98da2 100644
--- a/playbooks/openshift-hosted/private/config.yml
+++ b/playbooks/openshift-hosted/private/config.yml
@@ -21,6 +21,10 @@
- import_playbook: openshift_hosted_registry.yml
+- import_playbook: openshift_hosted_wait_for_pods.yml
+
+- import_playbook: openshift_hosted_registry_storage.yml
+
- import_playbook: cockpit-ui.yml
- import_playbook: install_docker_gc.yml
diff --git a/playbooks/openshift-hosted/private/install_docker_gc.yml b/playbooks/openshift-hosted/private/install_docker_gc.yml
index 1e3dfee07..03eb542d3 100644
--- a/playbooks/openshift-hosted/private/install_docker_gc.yml
+++ b/playbooks/openshift-hosted/private/install_docker_gc.yml
@@ -3,5 +3,5 @@
hosts: oo_first_master
gather_facts: false
tasks:
- - include_role:
+ - import_role:
name: openshift_docker_gc
diff --git a/playbooks/openshift-hosted/private/openshift_hosted_create_projects.yml b/playbooks/openshift-hosted/private/openshift_hosted_create_projects.yml
index d5ca5185c..b09432da2 100644
--- a/playbooks/openshift-hosted/private/openshift_hosted_create_projects.yml
+++ b/playbooks/openshift-hosted/private/openshift_hosted_create_projects.yml
@@ -2,6 +2,6 @@
- name: Create Hosted Resources - openshift projects
hosts: oo_first_master
tasks:
- - include_role:
+ - import_role:
name: openshift_hosted
tasks_from: create_projects.yml
diff --git a/playbooks/openshift-hosted/private/openshift_hosted_registry.yml b/playbooks/openshift-hosted/private/openshift_hosted_registry.yml
index 2a91a827c..659c95eda 100644
--- a/playbooks/openshift-hosted/private/openshift_hosted_registry.yml
+++ b/playbooks/openshift-hosted/private/openshift_hosted_registry.yml
@@ -5,7 +5,7 @@
- set_fact:
openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
- - include_role:
+ - import_role:
name: openshift_hosted
tasks_from: registry.yml
when:
diff --git a/playbooks/openshift-hosted/private/openshift_hosted_registry_storage.yml b/playbooks/openshift-hosted/private/openshift_hosted_registry_storage.yml
new file mode 100644
index 000000000..cfc47c9b2
--- /dev/null
+++ b/playbooks/openshift-hosted/private/openshift_hosted_registry_storage.yml
@@ -0,0 +1,13 @@
+---
+# This playbook waits for registry and router pods after both have been
+# created. It is intended to allow the tasks of deploying both to complete
+# before polling to save time.
+- name: Poll for hosted pod deployments
+ hosts: oo_first_master
+ tasks:
+ - import_role:
+ name: openshift_hosted
+ tasks_from: registry_storage.yml
+ when:
+ - openshift_hosted_manage_registry | default(True) | bool
+ - openshift_hosted_registry_registryurl is defined
diff --git a/playbooks/openshift-hosted/private/openshift_hosted_router.yml b/playbooks/openshift-hosted/private/openshift_hosted_router.yml
index bcb5a34a4..353377189 100644
--- a/playbooks/openshift-hosted/private/openshift_hosted_router.yml
+++ b/playbooks/openshift-hosted/private/openshift_hosted_router.yml
@@ -5,7 +5,7 @@
- set_fact:
openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
- - include_role:
+ - import_role:
name: openshift_hosted
tasks_from: router.yml
when:
diff --git a/playbooks/openshift-hosted/private/openshift_hosted_wait_for_pods.yml b/playbooks/openshift-hosted/private/openshift_hosted_wait_for_pods.yml
new file mode 100644
index 000000000..1f6868c2a
--- /dev/null
+++ b/playbooks/openshift-hosted/private/openshift_hosted_wait_for_pods.yml
@@ -0,0 +1,26 @@
+---
+# This playbook waits for registry and router pods after both have been
+# created. It is intended to allow the tasks of deploying both to complete
+# before polling to save time.
+- name: Poll for hosted pod deployments
+ hosts: oo_first_master
+ tasks:
+ - import_role:
+ name: openshift_hosted
+ tasks_from: wait_for_pod.yml
+ vars:
+ l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_router_wait }}"
+ l_openshift_hosted_wfp_items: "{{ openshift_hosted_routers }}"
+ when:
+ - openshift_hosted_manage_router | default(True) | bool
+ - openshift_hosted_router_registryurl is defined
+
+ - import_role:
+ name: openshift_hosted
+ tasks_from: wait_for_pod.yml
+ vars:
+ l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_registry_wait }}"
+ l_openshift_hosted_wfp_items: "{{ r_openshift_hosted_registry_list }}"
+ when:
+ - openshift_hosted_manage_registry | default(True) | bool
+ - openshift_hosted_registry_registryurl is defined
diff --git a/playbooks/openshift-hosted/private/redeploy-router-certificates.yml b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml
index c19147d41..0df748f47 100644
--- a/playbooks/openshift-hosted/private/redeploy-router-certificates.yml
+++ b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml
@@ -115,7 +115,7 @@
- ('service.alpha.openshift.io/serving-cert-secret-name') not in router_service_annotations
- ('service.alpha.openshift.io/serving-cert-signed-by') not in router_service_annotations
- - include_role:
+ - import_role:
name: openshift_hosted
tasks_from: main
vars:
diff --git a/playbooks/openshift-logging/private/config.yml b/playbooks/openshift-logging/private/config.yml
index bc59bd95a..d6b26647c 100644
--- a/playbooks/openshift-logging/private/config.yml
+++ b/playbooks/openshift-logging/private/config.yml
@@ -16,11 +16,12 @@
roles:
- openshift_logging
+# TODO: Remove when master config property is removed
- name: Update Master configs
hosts: oo_masters:!oo_first_master
tasks:
- block:
- - include_role:
+ - import_role:
name: openshift_logging
tasks_from: update_master_config
diff --git a/playbooks/openshift-management/add_many_container_providers.yml b/playbooks/openshift-management/add_many_container_providers.yml
index 62fdb11c5..45231a495 100644
--- a/playbooks/openshift-management/add_many_container_providers.yml
+++ b/playbooks/openshift-management/add_many_container_providers.yml
@@ -27,7 +27,7 @@
register: results
# Include openshift_management for access to filter_plugins.
- - include_role:
+ - import_role:
name: openshift_management
tasks_from: noop
diff --git a/playbooks/openshift-management/private/add_container_provider.yml b/playbooks/openshift-management/private/add_container_provider.yml
index facb3a5b9..25d4058e5 100644
--- a/playbooks/openshift-management/private/add_container_provider.yml
+++ b/playbooks/openshift-management/private/add_container_provider.yml
@@ -3,6 +3,6 @@
hosts: oo_first_master
tasks:
- name: Run the Management Integration Tasks
- include_role:
+ import_role:
name: openshift_management
tasks_from: add_container_provider
diff --git a/playbooks/openshift-management/private/config.yml b/playbooks/openshift-management/private/config.yml
index 3f1cdf713..22f3ee8f3 100644
--- a/playbooks/openshift-management/private/config.yml
+++ b/playbooks/openshift-management/private/config.yml
@@ -21,7 +21,7 @@
tasks:
- name: Run the CFME Setup Role
- include_role:
+ import_role:
name: openshift_management
vars:
template_dir: "{{ hostvars[groups.masters.0].r_openshift_management_mktemp.stdout }}"
diff --git a/playbooks/openshift-management/private/uninstall.yml b/playbooks/openshift-management/private/uninstall.yml
index 9f35cc276..6097ea45a 100644
--- a/playbooks/openshift-management/private/uninstall.yml
+++ b/playbooks/openshift-management/private/uninstall.yml
@@ -3,6 +3,6 @@
hosts: masters[0]
tasks:
- name: Run the CFME Uninstall Role Tasks
- include_role:
+ import_role:
name: openshift_management
tasks_from: uninstall
diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml
index 81bb8cc5c..85be0e600 100644
--- a/playbooks/openshift-master/private/additional_config.yml
+++ b/playbooks/openshift-master/private/additional_config.yml
@@ -31,7 +31,7 @@
- role: cockpit
when:
- not openshift_is_atomic | bool
- - deployment_type == 'openshift-enterprise'
+ - openshift_deployment_type == 'openshift-enterprise'
- osm_use_cockpit is undefined or osm_use_cockpit | bool
- openshift.common.deployment_subtype != 'registry'
- role: flannel_register
diff --git a/playbooks/openshift-master/private/certificates-backup.yml b/playbooks/openshift-master/private/certificates-backup.yml
index 4dbc041b0..56af18ca7 100644
--- a/playbooks/openshift-master/private/certificates-backup.yml
+++ b/playbooks/openshift-master/private/certificates-backup.yml
@@ -28,6 +28,7 @@
path: "{{ openshift.common.config_base }}/master/{{ item }}"
state: absent
with_items:
+ # certificates_to_synchronize is a custom filter in lib_utils
- "{{ hostvars[inventory_hostname] | certificates_to_synchronize(include_keys=false, include_ca=false) }}"
- "etcd.server.crt"
- "etcd.server.key"
diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml
index 3093444b4..153ea9993 100644
--- a/playbooks/openshift-master/private/config.yml
+++ b/playbooks/openshift-master/private/config.yml
@@ -47,7 +47,7 @@
state: absent
when:
- rpmgenerated_config.stat.exists == true
- - deployment_type == 'openshift-enterprise'
+ - openshift_deployment_type == 'openshift-enterprise'
with_items:
- master
- node
@@ -185,9 +185,6 @@
- role: openshift_builddefaults
- role: openshift_buildoverrides
- role: nickhammond.logrotate
- - role: contiv
- contiv_role: netmaster
- when: openshift_use_contiv | default(False) | bool
- role: openshift_master
openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}"
@@ -206,13 +203,13 @@
- role: calico_master
when: openshift_use_calico | default(false) | bool
tasks:
- - include_role:
+ - import_role:
name: kuryr
tasks_from: master
when: openshift_use_kuryr | default(false) | bool
- name: Setup the node group config maps
- include_role:
+ import_role:
name: openshift_node_group
when: openshift_master_bootstrap_enabled | default(false) | bool
run_once: True
diff --git a/playbooks/openshift-master/private/redeploy-openshift-ca.yml b/playbooks/openshift-master/private/redeploy-openshift-ca.yml
index 9d3c12ba1..663c39868 100644
--- a/playbooks/openshift-master/private/redeploy-openshift-ca.yml
+++ b/playbooks/openshift-master/private/redeploy-openshift-ca.yml
@@ -125,7 +125,6 @@
- name: Create temp directory for syncing certs
hosts: localhost
connection: local
- become: no
gather_facts: no
tasks:
- name: Create local temp directory for syncing certs
@@ -133,6 +132,10 @@
register: g_master_mktemp
changed_when: false
+ - name: Chmod local temp directory for syncing certs
+ local_action: command chmod 777 "{{ g_master_mktemp.stdout }}"
+ changed_when: false
+
- name: Retrieve OpenShift CA
hosts: oo_first_master
vars:
@@ -264,7 +267,6 @@
- name: Delete temporary directory on localhost
hosts: localhost
connection: local
- become: no
gather_facts: no
tasks:
- file:
diff --git a/playbooks/openshift-master/private/tasks/restart_hosts.yml b/playbooks/openshift-master/private/tasks/restart_hosts.yml
index a5dbe0590..76e1ea5f3 100644
--- a/playbooks/openshift-master/private/tasks/restart_hosts.yml
+++ b/playbooks/openshift-master/private/tasks/restart_hosts.yml
@@ -27,7 +27,6 @@
delay=10
timeout=600
port="{{ ansible_port | default(ansible_ssh_port | default(22,boolean=True),boolean=True) }}"
- become: no
# Now that ssh is back up we can wait for API on the remote system,
# avoiding some potential connection issues from local system:
diff --git a/playbooks/openshift-master/private/tasks/restart_services.yml b/playbooks/openshift-master/private/tasks/restart_services.yml
index 4e1b3a3be..cf2c282e3 100644
--- a/playbooks/openshift-master/private/tasks/restart_services.yml
+++ b/playbooks/openshift-master/private/tasks/restart_services.yml
@@ -1,4 +1,4 @@
---
-- include_role:
+- import_role:
name: openshift_master
tasks_from: restart.yml
diff --git a/playbooks/openshift-master/private/validate_restart.yml b/playbooks/openshift-master/private/validate_restart.yml
index 1077d0b9c..60b0e5bb6 100644
--- a/playbooks/openshift-master/private/validate_restart.yml
+++ b/playbooks/openshift-master/private/validate_restart.yml
@@ -21,7 +21,6 @@
- name: Create temp file on localhost
hosts: localhost
connection: local
- become: no
gather_facts: no
tasks:
- local_action: command mktemp
@@ -38,7 +37,6 @@
- name: Cleanup temp file on localhost
hosts: localhost
connection: local
- become: no
gather_facts: no
tasks:
- file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent
diff --git a/playbooks/openshift-master/scaleup.yml b/playbooks/openshift-master/scaleup.yml
index f717cd0e9..7d31340a2 100644
--- a/playbooks/openshift-master/scaleup.yml
+++ b/playbooks/openshift-master/scaleup.yml
@@ -4,7 +4,6 @@
- name: Ensure there are new_masters or new_nodes
hosts: localhost
connection: local
- become: no
gather_facts: no
tasks:
- fail:
diff --git a/playbooks/openshift-metrics/private/config.yml b/playbooks/openshift-metrics/private/config.yml
index 80cd93e5f..1e237e3f0 100644
--- a/playbooks/openshift-metrics/private/config.yml
+++ b/playbooks/openshift-metrics/private/config.yml
@@ -16,12 +16,13 @@
roles:
- role: openshift_metrics
+# TODO: Remove when master config property is removed
- name: OpenShift Metrics
hosts: oo_masters:!oo_first_master
serial: 1
tasks:
- name: Setup the non-first masters configs
- include_role:
+ import_role:
name: openshift_metrics
tasks_from: update_master_config.yaml
diff --git a/playbooks/openshift-node/private/additional_config.yml b/playbooks/openshift-node/private/additional_config.yml
index b86cb3cc2..0881121c9 100644
--- a/playbooks/openshift-node/private/additional_config.yml
+++ b/playbooks/openshift-node/private/additional_config.yml
@@ -47,17 +47,23 @@
- role: nuage_node
when: openshift_use_nuage | default(false) | bool
-- name: Additional node config
- hosts: oo_nodes_use_contiv
+- name: Configure Contiv masters
+ hosts: oo_masters_to_config
+ roles:
+ - role: contiv
+ contiv_master: true
+ when: openshift_use_contiv | default(false) | bool
+
+- name: Configure rest of Contiv nodes
+ hosts: "{{ groups.oo_nodes_use_contiv | default([]) | difference(groups.oo_masters_to_config) }}"
roles:
- role: contiv
- contiv_role: netplugin
when: openshift_use_contiv | default(false) | bool
- name: Configure Kuryr node
hosts: oo_nodes_use_kuryr
tasks:
- - include_role:
+ - import_role:
name: kuryr
tasks_from: node
when: openshift_use_kuryr | default(false) | bool
diff --git a/playbooks/openshift-node/private/configure_nodes.yml b/playbooks/openshift-node/private/configure_nodes.yml
index 548ff7c4f..a13173e63 100644
--- a/playbooks/openshift-node/private/configure_nodes.yml
+++ b/playbooks/openshift-node/private/configure_nodes.yml
@@ -11,6 +11,7 @@
}}"
roles:
- role: openshift_clock
+ - role: openshift_cloud_provider
- role: openshift_node
- role: tuned
- role: nickhammond.logrotate
diff --git a/playbooks/openshift-node/private/containerized_nodes.yml b/playbooks/openshift-node/private/containerized_nodes.yml
index dc68d7585..644e6a69c 100644
--- a/playbooks/openshift-node/private/containerized_nodes.yml
+++ b/playbooks/openshift-node/private/containerized_nodes.yml
@@ -13,6 +13,7 @@
roles:
- role: openshift_clock
+ - role: openshift_cloud_provider
- role: openshift_node
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- role: nickhammond.logrotate
diff --git a/playbooks/openshift-node/private/image_prep.yml b/playbooks/openshift-node/private/image_prep.yml
index 6b517197d..adcbb0fdb 100644
--- a/playbooks/openshift-node/private/image_prep.yml
+++ b/playbooks/openshift-node/private/image_prep.yml
@@ -12,6 +12,13 @@
- name: run node config
import_playbook: configure_nodes.yml
+- name: node bootstrap config
+ hosts: oo_nodes_to_config:!oo_containerized_master_nodes
+ tasks:
+ - import_role:
+ name: openshift_node
+ tasks_from: bootstrap.yml
+
- name: Re-enable excluders
import_playbook: enable_excluders.yml
diff --git a/playbooks/openshift-node/private/setup.yml b/playbooks/openshift-node/private/setup.yml
index 802dce37e..41c323f2b 100644
--- a/playbooks/openshift-node/private/setup.yml
+++ b/playbooks/openshift-node/private/setup.yml
@@ -8,7 +8,6 @@
- name: Evaluate node groups
hosts: localhost
- become: no
connection: local
tasks:
- name: Evaluate oo_containerized_master_nodes
diff --git a/playbooks/openshift-node/scaleup.yml b/playbooks/openshift-node/scaleup.yml
index bdfd3d3e6..cf13692ae 100644
--- a/playbooks/openshift-node/scaleup.yml
+++ b/playbooks/openshift-node/scaleup.yml
@@ -4,7 +4,6 @@
- name: Ensure there are new_nodes
hosts: localhost
connection: local
- become: no
gather_facts: no
tasks:
- fail:
diff --git a/playbooks/openshift-web-console/config.yml b/playbooks/openshift-web-console/config.yml
new file mode 100644
index 000000000..c7814207c
--- /dev/null
+++ b/playbooks/openshift-web-console/config.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/config.yml
diff --git a/playbooks/openshift-web-console/private/config.yml b/playbooks/openshift-web-console/private/config.yml
new file mode 100644
index 000000000..ffd702d20
--- /dev/null
+++ b/playbooks/openshift-web-console/private/config.yml
@@ -0,0 +1,31 @@
+---
+- name: Web Console Install Checkpoint Start
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Set Web Console install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_web_console:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- name: Web Console
+ hosts: oo_first_master
+ roles:
+ - openshift_web_console
+ vars:
+ first_master: "{{ groups.oo_first_master[0] }}"
+
+- name: Web Console Install Checkpoint End
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Set Web Console install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_web_console:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/openshift-web-console/private/roles b/playbooks/openshift-web-console/private/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/openshift-web-console/private/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md
index 2c9b70b5f..afa56d168 100644
--- a/playbooks/openstack/advanced-configuration.md
+++ b/playbooks/openstack/advanced-configuration.md
@@ -133,7 +133,7 @@ You can also access the OpenShift cluster with a web browser by going to:
https://master-0.openshift.example.com:8443
Note that for this to work, the OpenShift nodes must be accessible
-from your computer and it's DNS configuration must use the cruster's
+from your computer and its DNS configuration must use the cluster's
DNS.
@@ -153,7 +153,7 @@ openstack stack delete --wait --yes openshift.example.com
Pay special attention to the values in the first paragraph -- these
will depend on your OpenStack environment.
-Note that the provsisioning playbooks update the original Neutron subnet
+Note that the provisioning playbooks update the original Neutron subnet
created with the Heat stack to point to the configured DNS servers.
So the provisioned cluster nodes will start using those natively as
default nameservers. Technically, this allows to deploy OpenShift clusters
@@ -162,7 +162,7 @@ without dnsmasq proxies.
The `openshift_openstack_clusterid` and `openshift_openstack_public_dns_domain`
will form the cluster's public DNS domain all your servers will be under. With
the default values, this will be `openshift.example.com`. For workloads, the
-default subdomain is 'apps'. That sudomain can be set as well by the
+default subdomain is 'apps'. That subdomain can be set as well by the
`openshift_openstack_app_subdomain` variable in the inventory.
If you want to use a two sets of hostnames for public and private/prefixed DNS
@@ -334,7 +334,7 @@ or your trusted network. The most important is the `openshift_openstack_node_ing
that restricts public access to the deployed DNS server and cluster
nodes' ephemeral ports range.
-Note, the command ``curl https://api.ipify.org`` helps fiding an external
+Note, the command ``curl https://api.ipify.org`` helps finding an external
IP address of your box (the ansible admin node).
There is also the `manage_packages` variable (defaults to True) you
@@ -372,6 +372,112 @@ In order to set a custom entrypoint, update `openshift_master_cluster_public_hos
Note than an empty hostname does not work, so if your domain is `openshift.example.com`,
you cannot set this value to simply `openshift.example.com`.
+
+## Using Cinder-backed Persistent Volumes
+
+You will need to set up OpenStack credentials. You can try putting this in your
+`inventory/group_vars/OSEv3.yml`:
+
+ openshift_cloudprovider_kind: openstack
+ openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
+ openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}"
+ openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
+ openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_PROJECT_NAME') }}"
+ openshift_cloudprovider_openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
+ openshift_cloudprovider_openstack_blockstorage_version: v2
+
+**NOTE**: you must specify the Block Storage version as v2, because OpenShift
+does not support the v3 API yet and the version detection is currently not
+working properly.
+
+For more information, consult the [Configuring for OpenStack page in the OpenShift documentation][openstack-credentials].
+
+[openstack-credentials]: https://docs.openshift.org/latest/install_config/configuring_openstack.html#install-config-configuring-openstack
+
+**NOTE** the OpenStack integration currently requires DNS to be configured and
+running and the `openshift_hostname` variable must match the Nova server name
+for each node. The cluster deployment will fail without it. If you use the
+provided OpenStack dynamic inventory and configure the
+`openshift_openstack_dns_nameservers` Ansible variable, this will be handled
+for you.
+
+After a successful deployment, the cluster is configured for Cinder persistent
+volumes.
+
+### Validation
+
+1. Log in and create a new project (with `oc login` and `oc new-project`)
+2. Create a file called `cinder-claim.yaml` with the following contents:
+
+```yaml
+apiVersion: "v1"
+kind: "PersistentVolumeClaim"
+metadata:
+ name: "claim1"
+spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+```
+3. Run `oc create -f cinder-claim.yaml` to create the Persistent Volume Claim object in OpenShift
+4. Run `oc describe pvc claim1` to verify that the claim was created and its Status is `Bound`
+5. Run `openstack volume list`
+ * A new volume called `kubernetes-dynamic-pvc-UUID` should be created
+ * Its size should be `1`
+ * It should not be attached to any server
+6. Create a file called `mysql-pod.yaml` with the following contents:
+
+```yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: mysql
+ labels:
+ name: mysql
+spec:
+ containers:
+ - resources:
+ limits :
+ cpu: 0.5
+ image: openshift/mysql-55-centos7
+ name: mysql
+ env:
+ - name: MYSQL_ROOT_PASSWORD
+ value: yourpassword
+ - name: MYSQL_USER
+ value: wp_user
+ - name: MYSQL_PASSWORD
+ value: wp_pass
+ - name: MYSQL_DATABASE
+ value: wp_db
+ ports:
+ - containerPort: 3306
+ name: mysql
+ volumeMounts:
+ - name: mysql-persistent-storage
+ mountPath: /var/lib/mysql/data
+ volumes:
+ - name: mysql-persistent-storage
+ persistentVolumeClaim:
+ claimName: claim1
+```
+
+7. Run `oc create -f mysql-pod.yaml` to create the pod
+8. Run `oc describe pod mysql`
+ * Its events should show that the pod has successfully attached the volume above
+ * It should show no errors
+ * `openstack volume list` should show the volume attached to an OpenShift app node
+ * NOTE: this can take several seconds
+9. After a while, `oc get pod` should show the `mysql` pod as running
+10. Run `oc delete pod mysql` to remove the pod
+ * The Cinder volume should no longer be attached
+11. Run `oc delete pvc claim1` to remove the volume claim
+ * The Cinder volume should be deleted
+
+
+
## Creating and using a Cinder volume for the OpenShift registry
You can optionally have the playbooks create a Cinder volume and set
@@ -415,7 +521,7 @@ OpenStack)[openstack] for more information.
[openstack]: https://docs.openshift.org/latest/install_config/configuring_openstack.html
-Next, we need to instruct OpenShift to use the Cinder volume for it's
+Next, we need to instruct OpenShift to use the Cinder volume for its
registry. Again in `OSEv3.yml`:
#openshift_hosted_registry_storage_kind: openstack
@@ -470,12 +576,12 @@ The **Cinder volume ID**, **filesystem** and **volume size** variables
must correspond to the values in your volume. The volume ID must be
the **UUID** of the Cinder volume, *not its name*.
-We can do formate the volume for you if you ask for it in
+The volume can also be formatted if you configure it in
`inventory/group_vars/all.yml`:
openshift_openstack_prepare_and_format_registry_volume: true
-**NOTE:** doing so **will destroy any data that's currently on the volume**!
+**NOTE:** Formatting **will destroy any data that's currently on the volume**!
You can also run the registry setup playbook directly:
diff --git a/playbooks/openstack/openshift-cluster/install.yml b/playbooks/openstack/openshift-cluster/install.yml
index 3211f619a..2ab7d14a0 100644
--- a/playbooks/openstack/openshift-cluster/install.yml
+++ b/playbooks/openstack/openshift-cluster/install.yml
@@ -9,4 +9,7 @@
# some logic here?
- name: run the cluster deploy
+ import_playbook: ../../prerequisites.yml
+
+- name: run the cluster deploy
import_playbook: ../../deploy_cluster.yml
diff --git a/playbooks/openstack/openshift-cluster/prerequisites.yml b/playbooks/openstack/openshift-cluster/prerequisites.yml
index 0356b37dd..8bb700501 100644
--- a/playbooks/openstack/openshift-cluster/prerequisites.yml
+++ b/playbooks/openstack/openshift-cluster/prerequisites.yml
@@ -2,11 +2,11 @@
- hosts: localhost
tasks:
- name: Check dependencies and OpenStack prerequisites
- include_role:
+ import_role:
name: openshift_openstack
tasks_from: check-prerequisites.yml
- name: Check network configuration
- include_role:
+ import_role:
name: openshift_openstack
tasks_from: net_vars_check.yaml
diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml
index fa5c91ace..a38d7bff7 100644
--- a/playbooks/openstack/openshift-cluster/provision.yml
+++ b/playbooks/openstack/openshift-cluster/provision.yml
@@ -3,7 +3,7 @@
hosts: localhost
tasks:
- name: provision cluster
- include_role:
+ import_role:
name: openshift_openstack
tasks_from: provision.yml
@@ -36,7 +36,7 @@
hosts: localhost
tasks:
- name: Populate DNS entries
- include_role:
+ import_role:
name: openshift_openstack
tasks_from: populate-dns.yml
when:
@@ -49,7 +49,7 @@
gather_facts: yes
tasks:
- name: Subscribe RHEL instances
- include_role:
+ import_role:
name: rhel_subscribe
when:
- ansible_distribution == "RedHat"
@@ -57,18 +57,18 @@
- rhsub_pass is defined
- name: Enable required YUM repositories
- include_role:
+ import_role:
name: openshift_repos
when:
- ansible_distribution == "RedHat"
- rh_subscribed is defined
- name: Install dependencies
- include_role:
+ import_role:
name: openshift_openstack
tasks_from: node-packages.yml
- name: Configure Node
- include_role:
+ import_role:
name: openshift_openstack
tasks_from: node-configuration.yml
diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
index 933117127..a8663f946 100644
--- a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
+++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
@@ -14,12 +14,13 @@ openshift_hosted_router_wait: True
openshift_hosted_registry_wait: True
## Openstack credentials
-#openshift_cloudprovider_kind=openstack
+#openshift_cloudprovider_kind: openstack
#openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
#openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}"
#openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
#openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}"
-#openshift_cloudprovider_openstack_region="{{ lookup('env', 'OS_REGION_NAME') }}"
+#openshift_cloudprovider_openstack_region: "{{ lookup('env', 'OS_REGION_NAME') }}"
+#openshift_cloudprovider_openstack_blockstorage_version: v2
## Use Cinder volume for Openshift registry:
diff --git a/playbooks/openstack/sample-inventory/group_vars/all.yml b/playbooks/openstack/sample-inventory/group_vars/all.yml
index c7afe9a24..d63229120 100644
--- a/playbooks/openstack/sample-inventory/group_vars/all.yml
+++ b/playbooks/openstack/sample-inventory/group_vars/all.yml
@@ -7,6 +7,7 @@ openshift_openstack_dns_nameservers: []
# # - set custom hostnames for roles by uncommenting corresponding lines
#openshift_openstack_master_hostname: "master"
#openshift_openstack_infra_hostname: "infra-node"
+#openshift_openstack_cns_hostname: "cns"
#openshift_openstack_node_hostname: "app-node"
#openshift_openstack_lb_hostname: "lb"
#openshift_openstack_etcd_hostname: "etcd"
@@ -30,6 +31,7 @@ openshift_openstack_external_network_name: "public"
# # - note: do not remove openshift_openstack_default_image_name definition
#openshift_openstack_master_image_name: "centos7"
#openshift_openstack_infra_image_name: "centos7"
+#openshift_openstack_cns_image_name: "centos7"
#openshift_openstack_node_image_name: "centos7"
#openshift_openstack_lb_image_name: "centos7"
#openshift_openstack_etcd_image_name: "centos7"
@@ -37,6 +39,7 @@ openshift_openstack_default_image_name: "centos7"
openshift_openstack_num_masters: 1
openshift_openstack_num_infra: 1
+openshift_openstack_num_cns: 0
openshift_openstack_num_nodes: 2
# # Used Flavors
@@ -44,6 +47,7 @@ openshift_openstack_num_nodes: 2
# # - note: do note remove openshift_openstack_default_flavor definition
#openshift_openstack_master_flavor: "m1.medium"
#openshift_openstack_infra_flavor: "m1.medium"
+#openshift_openstack_cns_flavor: "m1.medium"
#openshift_openstack_node_flavor: "m1.medium"
#openshift_openstack_lb_flavor: "m1.medium"
#openshift_openstack_etcd_flavor: "m1.medium"
@@ -57,6 +61,7 @@ openshift_openstack_default_flavor: "m1.medium"
# # - note: do not remove docker_default_volume_size definition
#openshift_openstack_docker_master_volume_size: "15"
#openshift_openstack_docker_infra_volume_size: "15"
+#openshift_openstack_docker_cns_volume_size: "15"
#openshift_openstack_docker_node_volume_size: "15"
#openshift_openstack_docker_etcd_volume_size: "2"
#openshift_openstack_docker_lb_volume_size: "5"
diff --git a/playbooks/openstack/sample-inventory/inventory.py b/playbooks/openstack/sample-inventory/inventory.py
index ad3fd936b..76e658eb7 100755
--- a/playbooks/openstack/sample-inventory/inventory.py
+++ b/playbooks/openstack/sample-inventory/inventory.py
@@ -9,6 +9,7 @@ environment.
from __future__ import print_function
+from collections import Mapping
import json
import shade
@@ -42,7 +43,10 @@ def build_inventory():
if server.metadata['host-type'] == 'node' and
server.metadata['sub-host-type'] == 'app']
- nodes = list(set(masters + infra_hosts + app))
+ cns = [server.name for server in cluster_hosts
+ if server.metadata['host-type'] == 'cns']
+
+ nodes = list(set(masters + infra_hosts + app + cns))
dns = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'dns']
@@ -59,6 +63,7 @@ def build_inventory():
inventory['nodes'] = {'hosts': nodes}
inventory['infra_hosts'] = {'hosts': infra_hosts}
inventory['app'] = {'hosts': app}
+ inventory['glusterfs'] = {'hosts': cns}
inventory['dns'] = {'hosts': dns}
inventory['lb'] = {'hosts': load_balancers}
@@ -84,16 +89,25 @@ def build_inventory():
# TODO(shadower): what about multiple networks?
if server.private_v4:
hostvars['private_v4'] = server.private_v4
+ hostvars['openshift_ip'] = server.private_v4
+
# NOTE(shadower): Yes, we set both hostname and IP to the private
# IP address for each node. OpenStack doesn't resolve nodes by
# name at all, so using a hostname here would require an internal
# DNS which would complicate the setup and potentially introduce
# performance issues.
- hostvars['openshift_ip'] = server.private_v4
- hostvars['openshift_hostname'] = server.private_v4
+ hostvars['openshift_hostname'] = server.metadata.get(
+ 'openshift_hostname', server.private_v4)
hostvars['openshift_public_hostname'] = server.name
+ if server.metadata['host-type'] == 'cns':
+ hostvars['glusterfs_devices'] = ['/dev/nvme0n1']
+
node_labels = server.metadata.get('node_labels')
+ # NOTE(shadower): the node_labels value must be a dict not string
+ if not isinstance(node_labels, Mapping):
+ node_labels = json.loads(node_labels)
+
if node_labels:
hostvars['openshift_node_labels'] = node_labels
diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml
index 68d7f3359..7802f83d9 100644
--- a/playbooks/prerequisites.yml
+++ b/playbooks/prerequisites.yml
@@ -3,8 +3,13 @@
vars:
skip_verison: True
+- import_playbook: init/validate_hostnames.yml
+ when: not (skip_validate_hostnames | default(False))
+
- import_playbook: init/repos.yml
+- import_playbook: init/base_packages.yml
+
# This is required for container runtime for crio, only needs to run once.
- name: Configure os_firewall
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config:oo_nodes_to_config