summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/adhoc/openshift_hosted_logging_efk.yaml4
-rw-r--r--playbooks/adhoc/uninstall.yml105
-rw-r--r--playbooks/aws/README.md9
-rwxr-xr-xplaybooks/aws/openshift-cluster/accept.yml6
-rw-r--r--playbooks/aws/openshift-cluster/build_ami.yml8
l---------playbooks/aws/openshift-cluster/filter_plugins1
-rw-r--r--playbooks/aws/openshift-cluster/hosted.yml25
-rw-r--r--playbooks/aws/openshift-cluster/install.yml37
l---------playbooks/aws/openshift-cluster/lookup_plugins1
-rw-r--r--playbooks/aws/openshift-cluster/prerequisites.yml8
-rw-r--r--playbooks/aws/openshift-cluster/provision.yml2
-rw-r--r--playbooks/aws/openshift-cluster/provision_install.yml15
-rw-r--r--playbooks/aws/openshift-cluster/provision_instance.yml2
-rw-r--r--playbooks/aws/openshift-cluster/provision_nodes.yml2
-rw-r--r--playbooks/aws/openshift-cluster/provision_sec_group.yml4
-rw-r--r--playbooks/aws/openshift-cluster/provision_ssh_keypair.yml2
-rw-r--r--playbooks/aws/openshift-cluster/provision_vpc.yml2
-rw-r--r--playbooks/aws/openshift-cluster/seal_ami.yml2
-rw-r--r--playbooks/aws/provisioning-inventory.example.ini1
-rw-r--r--playbooks/aws/provisioning_vars.yml.example11
-rw-r--r--playbooks/byo/config.yml2
l---------playbooks/byo/filter_plugins1
l---------playbooks/byo/lookup_plugins1
l---------playbooks/byo/openshift-checks/certificate_expiry/roles1
-rw-r--r--playbooks/byo/openshift-checks/health.yml6
-rw-r--r--playbooks/byo/openshift-checks/pre-install.yml6
-rw-r--r--playbooks/byo/openshift-cluster/config.yml12
-rw-r--r--playbooks/byo/openshift-cluster/enable_dnsmasq.yml4
l---------playbooks/byo/openshift-cluster/filter_plugins1
-rw-r--r--playbooks/byo/openshift-cluster/initialize_groups.yml10
l---------playbooks/byo/openshift-cluster/lookup_plugins1
-rw-r--r--playbooks/byo/openshift-cluster/openshift-hosted.yml6
-rw-r--r--playbooks/byo/openshift-cluster/openshift-logging.yml15
-rw-r--r--playbooks/byo/openshift-cluster/openshift-metrics.yml10
-rw-r--r--playbooks/byo/openshift-cluster/openshift-prometheus.yml6
-rw-r--r--playbooks/byo/openshift-cluster/openshift-provisioners.yml6
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-certificates.yml44
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml10
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml24
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-master-certificates.yml16
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-node-certificates.yml16
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml10
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml10
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-router-certificates.yml10
-rw-r--r--playbooks/byo/openshift-cluster/service-catalog.yml15
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/README.md3
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml6
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml7
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml16
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml9
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/README.md18
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml7
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml9
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml7
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml9
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/README.md2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml7
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_8/README.md (renamed from playbooks/byo/openshift-cluster/upgrades/v3_3/README.md)12
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml5
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml (renamed from playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml)4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml7
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/README.md (renamed from playbooks/byo/openshift-cluster/upgrades/v3_5/README.md)12
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml5
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml (renamed from playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml)4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml7
-rw-r--r--playbooks/byo/openshift-etcd/certificates.yml8
-rw-r--r--playbooks/byo/openshift-etcd/config.yml6
-rw-r--r--playbooks/byo/openshift-etcd/embedded2external.yml6
l---------playbooks/byo/openshift-etcd/filter_plugins1
l---------playbooks/byo/openshift-etcd/lookup_plugins1
-rw-r--r--playbooks/byo/openshift-etcd/migrate.yml6
-rw-r--r--playbooks/byo/openshift-etcd/restart.yml6
-rw-r--r--playbooks/byo/openshift-etcd/scaleup.yml6
-rw-r--r--playbooks/byo/openshift-glusterfs/config.yml10
l---------playbooks/byo/openshift-glusterfs/filter_plugins1
l---------playbooks/byo/openshift-glusterfs/lookup_plugins1
-rw-r--r--playbooks/byo/openshift-glusterfs/registry.yml10
-rw-r--r--playbooks/byo/openshift-loadbalancer/config.yml6
-rw-r--r--playbooks/byo/openshift-management/config.yml6
-rw-r--r--playbooks/byo/openshift-management/uninstall.yml4
-rw-r--r--playbooks/byo/openshift-master/additional_config.yml6
-rw-r--r--playbooks/byo/openshift-master/certificates.yml6
-rw-r--r--playbooks/byo/openshift-master/config.yml6
l---------playbooks/byo/openshift-master/filter_plugins1
l---------playbooks/byo/openshift-master/lookup_plugins1
-rw-r--r--playbooks/byo/openshift-master/restart.yml6
-rw-r--r--playbooks/byo/openshift-master/scaleup.yml20
-rw-r--r--playbooks/byo/openshift-nfs/config.yml6
-rw-r--r--playbooks/byo/openshift-node/certificates.yml6
-rw-r--r--playbooks/byo/openshift-node/config.yml6
l---------playbooks/byo/openshift-node/filter_plugins1
l---------playbooks/byo/openshift-node/lookup_plugins1
-rw-r--r--playbooks/byo/openshift-node/network_manager.yml4
-rw-r--r--playbooks/byo/openshift-node/restart.yml6
-rw-r--r--playbooks/byo/openshift-preflight/check.yml3
-rw-r--r--playbooks/byo/openshift_facts.yml17
-rw-r--r--playbooks/byo/rhel_subscribe.yml12
l---------playbooks/certificate_expiry1
-rw-r--r--playbooks/common/openshift-cluster/cockpit-ui.yml6
-rw-r--r--playbooks/common/openshift-cluster/config.yml67
-rw-r--r--playbooks/common/openshift-cluster/create_persistent_volumes.yml18
-rw-r--r--playbooks/common/openshift-cluster/enable_dnsmasq.yml60
l---------playbooks/common/openshift-cluster/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml156
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_repos.yml8
l---------playbooks/common/openshift-cluster/library1
l---------playbooks/common/openshift-cluster/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml35
-rw-r--r--playbooks/common/openshift-cluster/openshift_prometheus.yml5
l---------playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins1
l---------playbooks/common/openshift-cluster/redeploy-certificates/library1
l---------playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins1
l---------playbooks/common/openshift-cluster/redeploy-certificates/roles1
-rw-r--r--playbooks/common/openshift-cluster/sanity_checks.yml51
-rw-r--r--playbooks/common/openshift-cluster/std_include.yml46
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml13
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml13
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml15
-rw-r--r--playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml21
-rw-r--r--playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_excluders.yml (renamed from playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml)3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml26
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh25
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml57
l---------playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins1
l---------playbooks/common/openshift-cluster/upgrades/etcd/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml108
l---------playbooks/common/openshift-cluster/upgrades/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml11
l---------playbooks/common/openshift-cluster/upgrades/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml17
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/config.yml77
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml94
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml24
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml20
-rw-r--r--playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml38
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml183
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml48
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml66
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml173
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml66
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml20
l---------playbooks/common/openshift-cluster/upgrades/v3_3/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml118
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml119
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml113
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml10
l---------playbooks/common/openshift-cluster/upgrades/v3_4/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml116
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml119
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml111
l---------playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml118
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml123
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml67
l---------playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml117
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml117
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml111
l---------playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml133
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml131
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml109
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/master_config_upgrade.yml (renamed from playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml)0
l---------playbooks/common/openshift-cluster/upgrades/v3_8/roles (renamed from playbooks/common/openshift-cluster/upgrades/v3_5/roles)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml59
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml64
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml38
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml (renamed from playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml)5
l---------playbooks/common/openshift-cluster/upgrades/v3_9/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml55
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml65
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml34
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml7
-rw-r--r--playbooks/common/openshift-cluster/validate_hostnames.yml23
-rw-r--r--playbooks/common/openshift-etcd/certificates.yml4
l---------playbooks/common/openshift-etcd/filter_plugins1
l---------playbooks/common/openshift-etcd/lookup_plugins1
-rw-r--r--playbooks/common/openshift-etcd/restart.yml27
l---------playbooks/common/openshift-glusterfs/filter_plugins1
l---------playbooks/common/openshift-glusterfs/lookup_plugins1
-rw-r--r--playbooks/common/openshift-glusterfs/registry.yml49
l---------playbooks/common/openshift-loadbalancer/filter_plugins1
l---------playbooks/common/openshift-loadbalancer/lookup_plugins1
l---------playbooks/common/openshift-management/filter_plugins1
l---------playbooks/common/openshift-management/library1
-rw-r--r--playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js2
l---------playbooks/common/openshift-master/filter_plugins1
l---------playbooks/common/openshift-master/library1
l---------playbooks/common/openshift-master/lookup_plugins1
-rw-r--r--playbooks/common/openshift-master/restart_services.yml22
l---------playbooks/common/openshift-nfs/filter_plugins1
l---------playbooks/common/openshift-nfs/lookup_plugins1
-rw-r--r--playbooks/common/openshift-node/config.yml34
l---------playbooks/common/openshift-node/filter_plugins1
-rw-r--r--playbooks/common/openshift-node/image_prep.yml21
l---------playbooks/common/openshift-node/lookup_plugins1
-rw-r--r--playbooks/container-runtime/config.yml6
-rw-r--r--playbooks/container-runtime/private/build_container_groups.yml6
-rw-r--r--playbooks/container-runtime/private/config.yml25
l---------playbooks/container-runtime/private/roles1
-rw-r--r--playbooks/container-runtime/private/setup_storage.yml18
-rw-r--r--playbooks/container-runtime/setup_storage.yml6
-rw-r--r--playbooks/deploy_cluster.yml49
-rw-r--r--playbooks/gcp/openshift-cluster/provision.yml19
-rw-r--r--playbooks/gcp/provision.yml13
-rw-r--r--playbooks/init/base_packages.yml37
-rw-r--r--playbooks/init/evaluate_groups.yml (renamed from playbooks/common/openshift-cluster/evaluate_groups.yml)10
-rw-r--r--playbooks/init/facts.yml104
-rw-r--r--playbooks/init/main.yml36
-rw-r--r--playbooks/init/repos.yml16
l---------playbooks/init/roles1
-rw-r--r--playbooks/init/sanity_checks.yml15
-rw-r--r--playbooks/init/validate_hostnames.yml43
-rw-r--r--playbooks/init/vars/cluster_hosts.yml (renamed from playbooks/byo/openshift-cluster/cluster_hosts.yml)0
-rw-r--r--playbooks/init/version.yml (renamed from playbooks/common/openshift-cluster/initialize_openshift_version.yml)11
-rw-r--r--playbooks/openshift-checks/README.md (renamed from playbooks/byo/openshift-checks/README.md)14
-rw-r--r--playbooks/openshift-checks/adhoc.yml (renamed from playbooks/byo/openshift-checks/adhoc.yml)8
-rw-r--r--playbooks/openshift-checks/certificate_expiry/default.yaml (renamed from playbooks/byo/openshift-checks/certificate_expiry/default.yaml)0
-rw-r--r--playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml (renamed from playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml)0
-rw-r--r--playbooks/openshift-checks/certificate_expiry/easy-mode.yaml (renamed from playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml)0
-rw-r--r--playbooks/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml (renamed from playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml)0
-rw-r--r--playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml (renamed from playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml)0
-rw-r--r--playbooks/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml (renamed from playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml)0
-rw-r--r--playbooks/openshift-checks/certificate_expiry/longer_warning_period.yaml (renamed from playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml)0
l---------playbooks/openshift-checks/certificate_expiry/roles (renamed from playbooks/byo/openshift-checks/roles)0
-rw-r--r--playbooks/openshift-checks/health.yml4
-rw-r--r--playbooks/openshift-checks/pre-install.yml4
-rw-r--r--playbooks/openshift-checks/private/adhoc.yml (renamed from playbooks/common/openshift-checks/adhoc.yml)5
-rw-r--r--playbooks/openshift-checks/private/health.yml (renamed from playbooks/common/openshift-checks/health.yml)6
-rw-r--r--playbooks/openshift-checks/private/install.yml51
-rw-r--r--playbooks/openshift-checks/private/pre-install.yml (renamed from playbooks/common/openshift-checks/pre-install.yml)6
l---------playbooks/openshift-checks/private/roles (renamed from playbooks/byo/openshift-etcd/roles)0
l---------playbooks/openshift-checks/roles1
-rw-r--r--playbooks/openshift-etcd/certificates.yml6
-rw-r--r--playbooks/openshift-etcd/config.yml4
-rw-r--r--playbooks/openshift-etcd/embedded2external.yml4
-rw-r--r--playbooks/openshift-etcd/migrate.yml4
-rw-r--r--playbooks/openshift-etcd/private/ca.yml (renamed from playbooks/common/openshift-etcd/ca.yml)5
-rw-r--r--playbooks/openshift-etcd/private/certificates-backup.yml (renamed from playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml)12
-rw-r--r--playbooks/openshift-etcd/private/certificates.yml4
-rw-r--r--playbooks/openshift-etcd/private/config.yml (renamed from playbooks/common/openshift-etcd/config.yml)23
-rw-r--r--playbooks/openshift-etcd/private/embedded2external.yml (renamed from playbooks/common/openshift-etcd/embedded2external.yml)68
-rw-r--r--playbooks/openshift-etcd/private/master_etcd_certificates.yml (renamed from playbooks/common/openshift-etcd/master_etcd_certificates.yml)1
-rw-r--r--playbooks/openshift-etcd/private/migrate.yml (renamed from playbooks/common/openshift-etcd/migrate.yml)49
-rw-r--r--playbooks/openshift-etcd/private/redeploy-ca.yml (renamed from playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml)48
-rw-r--r--playbooks/openshift-etcd/private/redeploy-certificates.yml (renamed from playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml)8
-rw-r--r--playbooks/openshift-etcd/private/restart.yml19
l---------playbooks/openshift-etcd/private/roles (renamed from playbooks/byo/openshift-glusterfs/roles)0
-rw-r--r--playbooks/openshift-etcd/private/scaleup.yml (renamed from playbooks/common/openshift-etcd/scaleup.yml)14
-rw-r--r--playbooks/openshift-etcd/private/server_certificates.yml (renamed from playbooks/common/openshift-etcd/server_certificates.yml)5
-rw-r--r--playbooks/openshift-etcd/private/upgrade_backup.yml (renamed from playbooks/common/openshift-cluster/upgrades/etcd/backup.yml)10
-rw-r--r--playbooks/openshift-etcd/private/upgrade_image_members.yml (renamed from playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml)11
-rw-r--r--playbooks/openshift-etcd/private/upgrade_main.yml (renamed from playbooks/common/openshift-cluster/upgrades/etcd/main.yml)10
-rw-r--r--playbooks/openshift-etcd/private/upgrade_rpm_members.yml (renamed from playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml)11
-rw-r--r--playbooks/openshift-etcd/private/upgrade_step.yml64
-rw-r--r--playbooks/openshift-etcd/redeploy-ca.yml4
-rw-r--r--playbooks/openshift-etcd/redeploy-certificates.yml10
-rw-r--r--playbooks/openshift-etcd/restart.yml4
-rw-r--r--playbooks/openshift-etcd/scaleup.yml4
-rw-r--r--playbooks/openshift-etcd/upgrade.yml4
-rw-r--r--playbooks/openshift-glusterfs/README.md (renamed from playbooks/byo/openshift-glusterfs/README.md)5
-rw-r--r--playbooks/openshift-glusterfs/config.yml4
-rw-r--r--playbooks/openshift-glusterfs/private/config.yml (renamed from playbooks/common/openshift-glusterfs/config.yml)32
-rw-r--r--playbooks/openshift-glusterfs/private/registry.yml20
l---------playbooks/openshift-glusterfs/private/roles (renamed from playbooks/common/openshift-etcd/roles)0
-rw-r--r--playbooks/openshift-glusterfs/registry.yml4
-rw-r--r--playbooks/openshift-hosted/config.yml4
-rw-r--r--playbooks/openshift-hosted/private/cockpit-ui.yml8
-rw-r--r--playbooks/openshift-hosted/private/config.yml45
-rw-r--r--playbooks/openshift-hosted/private/create_persistent_volumes.yml5
-rw-r--r--playbooks/openshift-hosted/private/install_docker_gc.yml7
-rw-r--r--playbooks/openshift-hosted/private/openshift_default_storage_class.yml (renamed from playbooks/common/openshift-cluster/openshift_default_storage_class.yml)2
-rw-r--r--playbooks/openshift-hosted/private/openshift_hosted_create_projects.yml (renamed from playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml)2
-rw-r--r--playbooks/openshift-hosted/private/openshift_hosted_registry.yml (renamed from playbooks/common/openshift-cluster/openshift_hosted_registry.yml)2
-rw-r--r--playbooks/openshift-hosted/private/openshift_hosted_registry_storage.yml13
-rw-r--r--playbooks/openshift-hosted/private/openshift_hosted_router.yml (renamed from playbooks/common/openshift-cluster/openshift_hosted_router.yml)2
-rw-r--r--playbooks/openshift-hosted/private/openshift_hosted_wait_for_pods.yml26
-rw-r--r--playbooks/openshift-hosted/private/redeploy-registry-certificates.yml (renamed from playbooks/common/openshift-cluster/redeploy-certificates/registry.yml)16
-rw-r--r--playbooks/openshift-hosted/private/redeploy-router-certificates.yml (renamed from playbooks/common/openshift-cluster/redeploy-certificates/router.yml)20
l---------playbooks/openshift-hosted/private/roles (renamed from playbooks/byo/openshift-master/roles)0
-rw-r--r--playbooks/openshift-hosted/redeploy-registry-certificates.yml4
-rw-r--r--playbooks/openshift-hosted/redeploy-router-certificates.yml4
-rw-r--r--playbooks/openshift-loadbalancer/config.yml4
-rw-r--r--playbooks/openshift-loadbalancer/private/config.yml (renamed from playbooks/common/openshift-loadbalancer/config.yml)29
l---------playbooks/openshift-loadbalancer/private/roles (renamed from playbooks/byo/openshift-node/roles)0
-rw-r--r--playbooks/openshift-logging/config.yml9
-rw-r--r--playbooks/openshift-logging/private/config.yml (renamed from playbooks/common/openshift-cluster/openshift_logging.yml)19
l---------playbooks/openshift-logging/private/roles (renamed from playbooks/common/openshift-checks/roles)0
-rw-r--r--playbooks/openshift-management/add_container_provider.yml4
-rw-r--r--playbooks/openshift-management/add_many_container_providers.yml36
-rw-r--r--playbooks/openshift-management/config.yml4
-rw-r--r--playbooks/openshift-management/private/add_container_provider.yml8
-rw-r--r--playbooks/openshift-management/private/config.yml (renamed from playbooks/common/openshift-management/config.yml)18
l---------playbooks/openshift-management/private/roles (renamed from playbooks/common/openshift-management/roles)0
-rw-r--r--playbooks/openshift-management/private/uninstall.yml (renamed from playbooks/common/openshift-management/uninstall.yml)4
l---------playbooks/openshift-management/roles1
-rw-r--r--playbooks/openshift-management/uninstall.yml2
-rw-r--r--playbooks/openshift-master/additional_config.yml4
-rw-r--r--playbooks/openshift-master/certificates.yml4
-rw-r--r--playbooks/openshift-master/config.yml4
-rw-r--r--playbooks/openshift-master/private/additional_config.yml (renamed from playbooks/common/openshift-master/additional_config.yml)26
-rw-r--r--playbooks/openshift-master/private/certificates-backup.yml (renamed from playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml)0
-rw-r--r--playbooks/openshift-master/private/certificates.yml (renamed from playbooks/common/openshift-master/certificates.yml)4
-rw-r--r--playbooks/openshift-master/private/config.yml (renamed from playbooks/common/openshift-master/config.yml)55
-rw-r--r--playbooks/openshift-master/private/redeploy-certificates.yml6
-rw-r--r--playbooks/openshift-master/private/redeploy-openshift-ca.yml (renamed from playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml)73
-rw-r--r--playbooks/openshift-master/private/restart.yml (renamed from playbooks/common/openshift-master/restart.yml)9
-rw-r--r--playbooks/openshift-master/private/revert-client-ca.yml17
l---------playbooks/openshift-master/private/roles (renamed from playbooks/common/openshift-glusterfs/roles)0
-rw-r--r--playbooks/openshift-master/private/scaleup.yml (renamed from playbooks/common/openshift-master/scaleup.yml)25
-rw-r--r--playbooks/openshift-master/private/set_network_facts.yml (renamed from playbooks/common/openshift-master/set_network_facts.yml)0
-rw-r--r--playbooks/openshift-master/private/tasks/restart_hosts.yml (renamed from playbooks/common/openshift-master/restart_hosts.yml)0
-rw-r--r--playbooks/openshift-master/private/tasks/restart_services.yml4
-rw-r--r--playbooks/openshift-master/private/tasks/wire_aggregator.yml (renamed from playbooks/common/openshift-master/tasks/wire_aggregator.yml)37
-rw-r--r--playbooks/openshift-master/private/templates/openshift-ansible-catalog-console.js1
-rw-r--r--playbooks/openshift-master/private/validate_restart.yml (renamed from playbooks/common/openshift-master/validate_restart.yml)3
-rw-r--r--playbooks/openshift-master/redeploy-certificates.yml6
-rw-r--r--playbooks/openshift-master/redeploy-openshift-ca.yml4
-rw-r--r--playbooks/openshift-master/restart.yml4
-rw-r--r--playbooks/openshift-master/scaleup.yml23
-rw-r--r--playbooks/openshift-metrics/config.yml4
-rw-r--r--playbooks/openshift-metrics/private/config.yml (renamed from playbooks/common/openshift-cluster/openshift_metrics.yml)19
l---------playbooks/openshift-metrics/private/roles1
-rw-r--r--playbooks/openshift-nfs/config.yml4
-rw-r--r--playbooks/openshift-nfs/private/config.yml (renamed from playbooks/common/openshift-nfs/config.yml)17
l---------playbooks/openshift-nfs/private/roles (renamed from playbooks/common/openshift-loadbalancer/roles)0
-rw-r--r--playbooks/openshift-node/certificates.yml4
-rw-r--r--playbooks/openshift-node/config.yml4
-rw-r--r--playbooks/openshift-node/network_manager.yml4
-rw-r--r--playbooks/openshift-node/private/additional_config.yml (renamed from playbooks/common/openshift-node/additional_config.yml)17
-rw-r--r--playbooks/openshift-node/private/certificates-backup.yml (renamed from playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml)0
-rw-r--r--playbooks/openshift-node/private/certificates.yml (renamed from playbooks/common/openshift-node/certificates.yml)0
-rw-r--r--playbooks/openshift-node/private/clean_image.yml10
-rw-r--r--playbooks/openshift-node/private/config.yml38
-rw-r--r--playbooks/openshift-node/private/configure_nodes.yml (renamed from playbooks/common/openshift-node/configure_nodes.yml)8
-rw-r--r--playbooks/openshift-node/private/containerized_nodes.yml (renamed from playbooks/common/openshift-node/containerized_nodes.yml)8
-rw-r--r--playbooks/openshift-node/private/enable_excluders.yml (renamed from playbooks/common/openshift-node/enable_excluders.yml)1
-rw-r--r--playbooks/openshift-node/private/etcd_client_config.yml (renamed from playbooks/common/openshift-node/etcd_client_config.yml)0
-rw-r--r--playbooks/openshift-node/private/image_prep.yml26
-rw-r--r--playbooks/openshift-node/private/manage_node.yml (renamed from playbooks/common/openshift-node/manage_node.yml)1
-rw-r--r--playbooks/openshift-node/private/network_manager.yml (renamed from playbooks/common/openshift-node/network_manager.yml)4
-rw-r--r--playbooks/openshift-node/private/redeploy-certificates.yml6
-rw-r--r--playbooks/openshift-node/private/restart.yml (renamed from playbooks/common/openshift-node/restart.yml)17
l---------playbooks/openshift-node/private/roles (renamed from playbooks/common/openshift-master/roles)0
-rw-r--r--playbooks/openshift-node/private/setup.yml (renamed from playbooks/common/openshift-node/setup.yml)3
-rw-r--r--playbooks/openshift-node/redeploy-certificates.yml6
-rw-r--r--playbooks/openshift-node/restart.yml4
-rw-r--r--playbooks/openshift-node/scaleup.yml (renamed from playbooks/byo/openshift-node/scaleup.yml)8
-rw-r--r--playbooks/openshift-prometheus/config.yml4
-rw-r--r--playbooks/openshift-prometheus/private/config.yml29
l---------playbooks/openshift-prometheus/private/roles1
-rw-r--r--playbooks/openshift-provisioners/config.yml4
-rw-r--r--playbooks/openshift-provisioners/private/config.yml (renamed from playbooks/common/openshift-cluster/openshift_provisioners.yml)0
l---------playbooks/openshift-provisioners/private/roles1
-rw-r--r--playbooks/openshift-service-catalog/config.yml4
-rw-r--r--playbooks/openshift-service-catalog/private/config.yml (renamed from playbooks/common/openshift-cluster/service_catalog.yml)16
l---------playbooks/openshift-service-catalog/private/roles1
-rw-r--r--playbooks/openshift-web-console/config.yml4
-rw-r--r--playbooks/openshift-web-console/private/config.yml31
l---------playbooks/openshift-web-console/private/roles (renamed from playbooks/common/openshift-nfs/roles)0
-rw-r--r--playbooks/openstack/README.md235
-rw-r--r--playbooks/openstack/advanced-configuration.md641
-rw-r--r--playbooks/openstack/openshift-cluster/install.yml15
-rw-r--r--playbooks/openstack/openshift-cluster/prerequisites.yml12
-rw-r--r--playbooks/openstack/openshift-cluster/provision.yml74
-rw-r--r--playbooks/openstack/openshift-cluster/provision_install.yml9
l---------playbooks/openstack/openshift-cluster/roles (renamed from playbooks/common/openshift-node/roles)0
-rw-r--r--playbooks/openstack/sample-inventory/group_vars/OSEv3.yml59
-rw-r--r--playbooks/openstack/sample-inventory/group_vars/all.yml126
-rwxr-xr-xplaybooks/openstack/sample-inventory/inventory.py117
-rw-r--r--playbooks/prerequisites.yml21
-rw-r--r--playbooks/redeploy-certificates.yml26
l---------playbooks/roles1
394 files changed, 3894 insertions, 4332 deletions
diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
index 44a2ef534..faeb332ad 100644
--- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml
+++ b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
@@ -8,9 +8,9 @@
hosts: masters:!masters[0]
pre_tasks:
- set_fact:
- openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+ openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}"
tasks:
- - include_role:
+ - import_role:
name: openshift_logging
tasks_from: update_master_config
when: openshift_hosted_logging_deploy | default(false) | bool
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 07f10d48c..0e0e2b425 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -18,9 +18,8 @@
# Since we're not calling openshift_facts we'll do this for now
- set_fact:
- is_atomic: "{{ ostree_output.rc == 0 }}"
- - set_fact:
- is_containerized: "{{ is_atomic or containerized | default(false) | bool }}"
+ openshift_is_atomic: "{{ ostree_output.rc == 0 }}"
+ openshift_is_containerized: "{{ ostree_output.rc == 0 or containerized | default(false) | bool }}"
# Stop services on all hosts prior to removing files.
- hosts: nodes
@@ -62,7 +61,6 @@
- origin-master
- origin-master-api
- origin-master-controllers
- - pcsd
failed_when: false
- hosts: etcd
@@ -124,13 +122,17 @@
- origin-clients
- origin-node
- origin-sdn-ovs
- - tuned-profiles-openshift-node
+ - tuned-profiles-atomic-openshift-node
- tuned-profiles-origin-node
+ register: result
+ until: result is succeeded
- name: Remove flannel package
package: name=flannel state=absent
when: openshift_use_flannel | default(false) | bool
- when: not is_atomic | bool
+ register: result
+ until: result is succeeded
+ when: not openshift_is_atomic | bool
- shell: systemctl reset-failed
changed_when: False
@@ -151,6 +153,14 @@
- lbr0
- vlinuxbr
- vovsbr
+
+ - name: Remove virtual devices
+ command: nmcli delete device "{{ item }}"
+ failed_when: False
+ with_items:
+ - tun0
+ - docker0
+
when: openshift_remove_all | default(true) | bool
- shell: atomic uninstall "{{ item }}"-master-api
@@ -264,12 +274,30 @@
- "{{ directories.results | default([]) }}"
- files
+ - shell: systemctl daemon-reload
+ changed_when: False
+
+ - name: restart container-engine
+ service: name=container-engine state=stopped enabled=no
+ failed_when: false
+ register: container_engine
+
+ - name: restart docker
+ service: name=docker state=stopped enabled=no
+ failed_when: false
+ when: not (container_engine is changed)
+ register: l_docker_restart_docker_in_pb_result
+ until: not (l_docker_restart_docker_in_pb_result is failed)
+ retries: 3
+ delay: 30
+
- name: Remove remaining files
file: path={{ item }} state=absent
with_items:
- /etc/ansible/facts.d/openshift.fact
- /etc/openshift
- /etc/openshift-sdn
+ - /etc/pki/ca-trust/source/anchors/openshift-ca.crt
- /etc/sysconfig/atomic-openshift-node
- /etc/sysconfig/atomic-openshift-node-dep
- /etc/sysconfig/openshift-node-dep
@@ -284,23 +312,38 @@
- /etc/systemd/system/origin-node-dep.service
- /etc/systemd/system/origin-node.service
- /etc/systemd/system/origin-node.service.wants
+ - /var/lib/docker
+
+ - name: Rebuild ca-trust
+ command: update-ca-trust
+
+ - name: Reset Docker proxy configuration
+ lineinfile:
+ state=absent
+ dest=/etc/sysconfig/docker
+ regexp='(NO_PROXY|HTTP_PROXY|HTTPS_PROXY)=.*'
+
+ - name: Reset Docker registry configuration
+ lineinfile:
+ state=absent
+ dest=/etc/sysconfig/docker
+ regexp='(ADD_REGISTRY|BLOCK_REGISTRY|INSECURE_REGISTRY)=.*'
+
+ - name: Detect Docker storage configuration
+ shell: vgs -o name | grep docker
+ register: docker_vg_name
+ failed_when: false
+ changed_when: false
- - shell: systemctl daemon-reload
- changed_when: False
+ - name: Wipe out Docker storage contents
+ command: vgremove -f {{ item }}
+ with_items: "{{ docker_vg_name.stdout_lines }}"
+ when: docker_vg_name.rc == 0
- - name: restart container-engine
- service: name=container-engine state=restarted
- failed_when: false
- register: container_engine
+ - name: Wipe out Docker storage configuration
+ file: path=/etc/sysconfig/docker-storage state=absent
+ when: docker_vg_name.rc == 0
- - name: restart docker
- service: name=docker state=restarted
- failed_when: false
- when: not (container_engine | changed)
- register: l_docker_restart_docker_in_pb_result
- until: not l_docker_restart_docker_in_pb_result | failed
- retries: 3
- delay: 30
- hosts: masters
become: yes
@@ -319,7 +362,7 @@
- name: Remove packages
package: name={{ item }} state=absent
- when: not is_atomic | bool and openshift_remove_all | default(True) | bool
+ when: not openshift_is_atomic | bool and openshift_remove_all | default(True) | bool
with_items:
- atomic-openshift
- atomic-openshift-clients
@@ -339,8 +382,8 @@
- origin-excluder
- origin-docker-excluder
- origin-master
- - pacemaker
- - pcs
+ register: result
+ until: result is succeeded
- shell: systemctl reset-failed
changed_when: False
@@ -409,8 +452,6 @@
- /etc/sysconfig/origin-master-api
- /etc/sysconfig/origin-master-controllers
- /usr/share/openshift/examples
- - /var/lib/pacemaker
- - /var/lib/pcsd
- /usr/lib/systemd/system/atomic-openshift-master-api.service
- /usr/lib/systemd/system/atomic-openshift-master-controllers.service
- /usr/lib/systemd/system/origin-master-api.service
@@ -445,17 +486,19 @@
- name: Stop additional atomic services
service: name={{ item }} state=stopped
- when: is_containerized | bool
+ when: openshift_is_containerized | bool
with_items:
- etcd_container
failed_when: false
- name: Remove packages
package: name={{ item }} state=absent
- when: not is_atomic | bool and openshift_remove_all | default(True) | bool
+ when: not openshift_is_atomic | bool and openshift_remove_all | default(True) | bool
with_items:
- etcd
- etcd3
+ register: result
+ until: result is succeeded
- shell: systemctl reset-failed
changed_when: False
@@ -510,9 +553,11 @@
- name: Remove packages
package: name={{ item }} state=absent
- when: not is_atomic | bool and openshift_remove_all | default(True) | bool
+ when: not openshift_is_atomic | bool and openshift_remove_all | default(True) | bool
with_items:
- haproxy
+ register: result
+ until: result is succeeded
- shell: systemctl reset-failed
changed_when: False
@@ -525,3 +570,7 @@
with_items:
- /etc/ansible/facts.d/openshift.fact
- /var/lib/haproxy/stats
+ # Here we remove only limits.conf rather than directory, as users may put their files.
+ # - /etc/systemd/system/haproxy.service.d
+ - /etc/systemd/system/haproxy.service.d/limits.conf
+ - /etc/systemd/system/haproxy.service
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
index fbab61189..d203b9cda 100644
--- a/playbooks/aws/README.md
+++ b/playbooks/aws/README.md
@@ -65,8 +65,9 @@ openshift_release: # example: v3.7
openshift_pkg_version: # example: -3.7.0
openshift_aws_ssh_key_name: # example: myuser_key
openshift_aws_base_ami: # example: ami-12345678
+# These are required when doing SSL on the ELBs
openshift_aws_iam_cert_path: # example: '/path/to/wildcard.<clusterid>.example.com.crt'
-openshift_aws_iam_key_path: # example: '/path/to/wildcard.<clusterid>.example.com.key'
+openshift_aws_iam_cert_key_path: # example: '/path/to/wildcard.<clusterid>.example.com.key'
```
If customization is required for the instances, scale groups, or any other configurable option please see the ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml) for variables and overrides. These overrides can be placed in the `provisioning_vars.yml`, `inventory`, or `group_vars`.
@@ -74,7 +75,7 @@ If customization is required for the instances, scale groups, or any other confi
In order to create the bootstrap-able AMI we need to create a basic openshift-ansible inventory. This enables us to create the AMI using the openshift-ansible node roles. This inventory should not include any hosts, but certain variables should be defined in the appropriate groups, just as deploying a cluster
using the normal openshift-ansible method. See provisioning-inventory.example.ini for an example.
-There are more examples of cluster inventory settings [`here`](../../inventory/byo/).
+There are more examples of cluster inventory settings [`here`](../../inventory/).
#### Step 0 (optional)
@@ -133,11 +134,11 @@ At this point we have successfully created the infrastructure including the mast
Now it is time to install Openshift using the openshift-ansible installer. This can be achieved by running the following playbook:
```
-$ ansible-playbook -i inventory.yml install.yml @provisioning_vars.yml
+$ ansible-playbook -i inventory.yml install.yml -e @provisioning_vars.yml
```
This playbook accomplishes the following:
1. Builds a dynamic inventory file by querying AWS.
-2. Runs the [`byo`](../../common/openshift-cluster/config.yml)
+2. Runs the [`deploy_cluster.yml`](../deploy_cluster.yml)
Once this playbook completes, the cluster masters should be installed and configured.
diff --git a/playbooks/aws/openshift-cluster/accept.yml b/playbooks/aws/openshift-cluster/accept.yml
index c2c8bea50..e7bed4f6e 100755
--- a/playbooks/aws/openshift-cluster/accept.yml
+++ b/playbooks/aws/openshift-cluster/accept.yml
@@ -14,11 +14,11 @@
msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
- name: bring lib_openshift into scope
- include_role:
+ import_role:
name: lib_openshift
- name: fetch masters
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region | default('us-east-1') }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
@@ -30,7 +30,7 @@
until: "'instances' in mastersout and mastersout.instances|length > 0"
- name: fetch new node instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region | default('us-east-1') }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml
index 5b4a6a1e8..5bf4f652a 100644
--- a/playbooks/aws/openshift-cluster/build_ami.yml
+++ b/playbooks/aws/openshift-cluster/build_ami.yml
@@ -17,7 +17,7 @@
- name: openshift_aws_region
msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
-- include: provision_instance.yml
+- import_playbook: provision_instance.yml
vars:
openshift_aws_node_group_type: compute
@@ -28,11 +28,13 @@
set_fact:
ansible_ssh_user: "{{ openshift_aws_build_ami_ssh_user | default(ansible_ssh_user) }}"
openshift_node_bootstrap: True
+ openshift_node_image_prep_packages:
+ - cloud-utils-growpart
# This is the part that installs all of the software and configs for the instance
# to become a node.
-- include: ../../common/openshift-node/image_prep.yml
+- import_playbook: ../../openshift-node/private/image_prep.yml
-- include: seal_ami.yml
+- import_playbook: seal_ami.yml
vars:
openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}"
diff --git a/playbooks/aws/openshift-cluster/filter_plugins b/playbooks/aws/openshift-cluster/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/aws/openshift-cluster/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/aws/openshift-cluster/hosted.yml b/playbooks/aws/openshift-cluster/hosted.yml
new file mode 100644
index 000000000..9d9ed29de
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/hosted.yml
@@ -0,0 +1,25 @@
+---
+- import_playbook: ../../openshift-hosted/private/config.yml
+
+- import_playbook: ../../openshift-metrics/private/config.yml
+ when: openshift_metrics_install_metrics | default(false) | bool
+
+- import_playbook: ../../openshift-logging/private/config.yml
+ when: openshift_logging_install_logging | default(false) | bool
+
+- import_playbook: ../../openshift-prometheus/private/config.yml
+ when: openshift_hosted_prometheus_deploy | default(false) | bool
+
+- import_playbook: ../../openshift-service-catalog/private/config.yml
+ when: openshift_enable_service_catalog | default(false) | bool
+
+- import_playbook: ../../openshift-management/private/config.yml
+ when: openshift_management_install_management | default(false) | bool
+
+- name: Print deprecated variable warning message if necessary
+ hosts: oo_first_master
+ gather_facts: no
+ tasks:
+ - debug: msg="{{__deprecation_message}}"
+ when:
+ - __deprecation_message | default ('') | length > 0
diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml
index 4d0bf9531..a3fc82f9a 100644
--- a/playbooks/aws/openshift-cluster/install.yml
+++ b/playbooks/aws/openshift-cluster/install.yml
@@ -2,7 +2,7 @@
- name: Setup the master node group
hosts: localhost
tasks:
- - include_role:
+ - import_role:
name: openshift_aws
tasks_from: setup_master_group.yml
@@ -11,15 +11,36 @@
gather_facts: no
remote_user: root
tasks:
- - include_role:
+ - import_role:
name: openshift_aws
tasks_from: master_facts.yml
-- name: normalize groups
- include: ../../byo/openshift-cluster/initialize_groups.yml
+- name: run the init
+ import_playbook: ../../init/main.yml
-- name: run the std_include
- include: ../../common/openshift-cluster/std_include.yml
+- name: perform the installer openshift-checks
+ import_playbook: ../../openshift-checks/private/install.yml
-- name: run the config
- include: ../../common/openshift-cluster/config.yml
+- name: etcd install
+ import_playbook: ../../openshift-etcd/private/config.yml
+
+- name: include nfs
+ import_playbook: ../../openshift-nfs/private/config.yml
+ when: groups.oo_nfs_to_config | default([]) | count > 0
+
+- name: include loadbalancer
+ import_playbook: ../../openshift-loadbalancer/private/config.yml
+ when: groups.oo_lb_to_config | default([]) | count > 0
+
+- name: include openshift-master config
+ import_playbook: ../../openshift-master/private/config.yml
+
+- name: include master additional config
+ import_playbook: ../../openshift-master/private/additional_config.yml
+
+- name: include master additional config
+ import_playbook: ../../openshift-node/private/config.yml
+
+- name: include openshift-glusterfs
+ import_playbook: ../../openshift-glusterfs/private/config.yml
+ when: groups.oo_glusterfs_to_config | default([]) | count > 0
diff --git a/playbooks/aws/openshift-cluster/lookup_plugins b/playbooks/aws/openshift-cluster/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/aws/openshift-cluster/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/aws/openshift-cluster/prerequisites.yml b/playbooks/aws/openshift-cluster/prerequisites.yml
index df77fe3bc..0afcce331 100644
--- a/playbooks/aws/openshift-cluster/prerequisites.yml
+++ b/playbooks/aws/openshift-cluster/prerequisites.yml
@@ -1,8 +1,6 @@
---
-- include: provision_vpc.yml
+- import_playbook: provision_vpc.yml
-- include: provision_ssh_keypair.yml
+- import_playbook: provision_ssh_keypair.yml
-- include: provision_sec_group.yml
- vars:
- openshift_aws_node_group_type: compute
+- import_playbook: provision_sec_group.yml
diff --git a/playbooks/aws/openshift-cluster/provision.yml b/playbooks/aws/openshift-cluster/provision.yml
index 4b5bd22ea..7dde60b7d 100644
--- a/playbooks/aws/openshift-cluster/provision.yml
+++ b/playbooks/aws/openshift-cluster/provision.yml
@@ -12,6 +12,6 @@
msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
- name: provision cluster
- include_role:
+ import_role:
name: openshift_aws
tasks_from: provision.yml
diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml
index e787deced..f98f5be9a 100644
--- a/playbooks/aws/openshift-cluster/provision_install.yml
+++ b/playbooks/aws/openshift-cluster/provision_install.yml
@@ -4,13 +4,16 @@
# this playbook is run with the following parameters:
# ansible-playbook -i openshift-ansible-inventory provision_install.yml
- name: Include the provision.yml playbook to create cluster
- include: provision.yml
+ import_playbook: provision.yml
-- name: Include the install.yml playbook to install cluster
- include: install.yml
+- name: Include the install.yml playbook to install cluster on masters
+ import_playbook: install.yml
-- name: Include the install.yml playbook to install cluster
- include: provision_nodes.yml
+- name: provision the infra/compute playbook to install node resources
+ import_playbook: provision_nodes.yml
- name: Include the accept.yml playbook to accept nodes into the cluster
- include: accept.yml
+ import_playbook: accept.yml
+
+- name: Include the hosted.yml playbook to finish the hosted configuration
+ import_playbook: hosted.yml
diff --git a/playbooks/aws/openshift-cluster/provision_instance.yml b/playbooks/aws/openshift-cluster/provision_instance.yml
index 6e843453c..6c7c1f069 100644
--- a/playbooks/aws/openshift-cluster/provision_instance.yml
+++ b/playbooks/aws/openshift-cluster/provision_instance.yml
@@ -7,6 +7,6 @@
gather_facts: no
tasks:
- name: create an instance and prepare for ami
- include_role:
+ import_role:
name: openshift_aws
tasks_from: provision_instance.yml
diff --git a/playbooks/aws/openshift-cluster/provision_nodes.yml b/playbooks/aws/openshift-cluster/provision_nodes.yml
index 44c686e08..82f147865 100644
--- a/playbooks/aws/openshift-cluster/provision_nodes.yml
+++ b/playbooks/aws/openshift-cluster/provision_nodes.yml
@@ -13,6 +13,6 @@
msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
- name: create the node groups
- include_role:
+ import_role:
name: openshift_aws
tasks_from: provision_nodes.yml
diff --git a/playbooks/aws/openshift-cluster/provision_sec_group.yml b/playbooks/aws/openshift-cluster/provision_sec_group.yml
index 039357adb..a0d4ec728 100644
--- a/playbooks/aws/openshift-cluster/provision_sec_group.yml
+++ b/playbooks/aws/openshift-cluster/provision_sec_group.yml
@@ -6,8 +6,8 @@
connection: local
gather_facts: no
tasks:
- - name: create an instance and prepare for ami
- include_role:
+ - name: create security groups
+ import_role:
name: openshift_aws
tasks_from: security_group.yml
when: openshift_aws_create_security_groups | default(True) | bool
diff --git a/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml b/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml
index 3ec683958..d86ff9f9b 100644
--- a/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml
+++ b/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml
@@ -4,7 +4,7 @@
gather_facts: no
tasks:
- name: create an instance and prepare for ami
- include_role:
+ import_role:
name: openshift_aws
tasks_from: ssh_keys.yml
vars:
diff --git a/playbooks/aws/openshift-cluster/provision_vpc.yml b/playbooks/aws/openshift-cluster/provision_vpc.yml
index 0a23a6d32..cf72f6c87 100644
--- a/playbooks/aws/openshift-cluster/provision_vpc.yml
+++ b/playbooks/aws/openshift-cluster/provision_vpc.yml
@@ -4,7 +4,7 @@
gather_facts: no
tasks:
- name: create a vpc
- include_role:
+ import_role:
name: openshift_aws
tasks_from: vpc.yml
when: openshift_aws_create_vpc | default(True) | bool
diff --git a/playbooks/aws/openshift-cluster/seal_ami.yml b/playbooks/aws/openshift-cluster/seal_ami.yml
index 8239a64fb..f315db604 100644
--- a/playbooks/aws/openshift-cluster/seal_ami.yml
+++ b/playbooks/aws/openshift-cluster/seal_ami.yml
@@ -7,6 +7,6 @@
become: no
tasks:
- name: seal the ami
- include_role:
+ import_role:
name: openshift_aws
tasks_from: seal_ami.yml
diff --git a/playbooks/aws/provisioning-inventory.example.ini b/playbooks/aws/provisioning-inventory.example.ini
index 238a7eb2f..cf76c9d10 100644
--- a/playbooks/aws/provisioning-inventory.example.ini
+++ b/playbooks/aws/provisioning-inventory.example.ini
@@ -11,6 +11,7 @@ etcd
openshift_deployment_type=origin
openshift_master_bootstrap_enabled=True
+openshift_master_api_port=443
openshift_hosted_router_wait=False
openshift_hosted_registry_wait=False
diff --git a/playbooks/aws/provisioning_vars.yml.example b/playbooks/aws/provisioning_vars.yml.example
index aa91363ae..f6b1a6b5d 100644
--- a/playbooks/aws/provisioning_vars.yml.example
+++ b/playbooks/aws/provisioning_vars.yml.example
@@ -46,7 +46,7 @@ openshift_pkg_version: # -3.7.0
# Name of the subnet in the vpc to use. Needs to be set if using a pre-existing
# vpc + subnet.
-#openshift_aws_subnet_name:
+#openshift_aws_subnet_az:
# -------------- #
# Security Group #
@@ -93,6 +93,11 @@ openshift_aws_ssh_key_name: # myuser_key
# --------- #
# Variables in this section apply to building a node AMI for use in your
# openshift cluster.
+# openshift-ansible will perform the container runtime storage setup when specified
+# The current storage setup with require a drive if using a separate storage device
+# for the container runtime.
+container_runtime_docker_storage_type: overlay2
+container_runtime_docker_storage_setup_device: /dev/xvdb
# must specify a base_ami when building an AMI
openshift_aws_base_ami: # ami-12345678
@@ -116,5 +121,5 @@ openshift_aws_base_ami: # ami-12345678
# custom certificates are required for the ELB
openshift_aws_iam_cert_path: # '/path/to/wildcard.<clusterid>.example.com.crt'
-openshift_aws_iam_key_path: # '/path/to/wildcard.<clusterid>.example.com.key'
-#openshift_aws_iam_cert_chain_path: '/path/to/cert.ca.crt'
+openshift_aws_iam_cert_key_path: # '/path/to/wildcard.<clusterid>.example.com.key'
+openshift_aws_iam_cert_chain_path: # '/path/to/cert.ca.crt'
diff --git a/playbooks/byo/config.yml b/playbooks/byo/config.yml
deleted file mode 100644
index 7d03914a2..000000000
--- a/playbooks/byo/config.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: openshift-cluster/config.yml
diff --git a/playbooks/byo/filter_plugins b/playbooks/byo/filter_plugins
deleted file mode 120000
index a4f518f07..000000000
--- a/playbooks/byo/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../filter_plugins \ No newline at end of file
diff --git a/playbooks/byo/lookup_plugins b/playbooks/byo/lookup_plugins
deleted file mode 120000
index c528bcd1d..000000000
--- a/playbooks/byo/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/roles b/playbooks/byo/openshift-checks/certificate_expiry/roles
deleted file mode 120000
index 4bdbcbad3..000000000
--- a/playbooks/byo/openshift-checks/certificate_expiry/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-checks/health.yml b/playbooks/byo/openshift-checks/health.yml
deleted file mode 100644
index 96a71e4dc..000000000
--- a/playbooks/byo/openshift-checks/health.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: ../openshift-cluster/initialize_groups.yml
-
-- include: ../../common/openshift-cluster/std_include.yml
-
-- include: ../../common/openshift-checks/health.yml
diff --git a/playbooks/byo/openshift-checks/pre-install.yml b/playbooks/byo/openshift-checks/pre-install.yml
deleted file mode 100644
index dd93df0bb..000000000
--- a/playbooks/byo/openshift-checks/pre-install.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: ../openshift-cluster/initialize_groups.yml
-
-- include: ../../common/openshift-cluster/std_include.yml
-
-- include: ../../common/openshift-checks/pre-install.yml
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
deleted file mode 100644
index 60fa44c5b..000000000
--- a/playbooks/byo/openshift-cluster/config.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- include: initialize_groups.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/config.yml
- vars:
- openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}"
diff --git a/playbooks/byo/openshift-cluster/enable_dnsmasq.yml b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml
deleted file mode 100644
index 9ce8f0d3c..000000000
--- a/playbooks/byo/openshift-cluster/enable_dnsmasq.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: initialize_groups.yml
-
-- include: ../../common/openshift-cluster/enable_dnsmasq.yml
diff --git a/playbooks/byo/openshift-cluster/filter_plugins b/playbooks/byo/openshift-cluster/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/byo/openshift-cluster/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/initialize_groups.yml b/playbooks/byo/openshift-cluster/initialize_groups.yml
deleted file mode 100644
index 2a725510a..000000000
--- a/playbooks/byo/openshift-cluster/initialize_groups.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: cluster_hosts.yml
diff --git a/playbooks/byo/openshift-cluster/lookup_plugins b/playbooks/byo/openshift-cluster/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/byo/openshift-cluster/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/openshift-hosted.yml b/playbooks/byo/openshift-cluster/openshift-hosted.yml
deleted file mode 100644
index edd4c8d7b..000000000
--- a/playbooks/byo/openshift-cluster/openshift-hosted.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: initialize_groups.yml
-
-- include: ../../common/openshift-cluster/std_include.yml
-
-- include: ../../common/openshift-cluster/openshift_hosted.yml
diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/byo/openshift-cluster/openshift-logging.yml
deleted file mode 100644
index a523bb47f..000000000
--- a/playbooks/byo/openshift-cluster/openshift-logging.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-#
-# This playbook is a preview of upcoming changes for installing
-# Hosted logging on. See inventory/byo/hosts.*.example for the
-# currently supported method.
-#
-- include: initialize_groups.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/openshift_logging.yml
diff --git a/playbooks/byo/openshift-cluster/openshift-metrics.yml b/playbooks/byo/openshift-cluster/openshift-metrics.yml
deleted file mode 100644
index 1135c8c11..000000000
--- a/playbooks/byo/openshift-cluster/openshift-metrics.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- include: initialize_groups.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/openshift_metrics.yml
diff --git a/playbooks/byo/openshift-cluster/openshift-prometheus.yml b/playbooks/byo/openshift-cluster/openshift-prometheus.yml
deleted file mode 100644
index 4d3f7f42c..000000000
--- a/playbooks/byo/openshift-cluster/openshift-prometheus.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: initialize_groups.yml
-
-- include: ../../common/openshift-cluster/std_include.yml
-
-- include: ../../common/openshift-cluster/openshift_prometheus.yml
diff --git a/playbooks/byo/openshift-cluster/openshift-provisioners.yml b/playbooks/byo/openshift-cluster/openshift-provisioners.yml
deleted file mode 100644
index 8e80f158b..000000000
--- a/playbooks/byo/openshift-cluster/openshift-provisioners.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: initialize_groups.yml
-
-- include: ../../common/openshift-cluster/std_include.yml
-
-- include: ../../common/openshift-cluster/openshift_provisioners.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml
deleted file mode 100644
index 255b0dbf7..000000000
--- a/playbooks/byo/openshift-cluster/redeploy-certificates.yml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-- include: initialize_groups.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/redeploy-certificates/check-expiry.yml
- vars:
- g_check_expiry_hosts: 'oo_etcd_to_config'
-
-- include: ../../common/openshift-cluster/redeploy-certificates/etcd-backup.yml
-
-- include: ../../common/openshift-etcd/certificates.yml
- vars:
- etcd_certificates_redeploy: true
-
-- include: ../../common/openshift-cluster/redeploy-certificates/masters-backup.yml
-
-- include: ../../common/openshift-master/certificates.yml
- vars:
- openshift_certificates_redeploy: true
-
-- include: ../../common/openshift-cluster/redeploy-certificates/nodes-backup.yml
-
-- include: ../../common/openshift-node/certificates.yml
- vars:
- openshift_certificates_redeploy: true
-
-- include: ../../common/openshift-etcd/restart.yml
- vars:
- g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}"
-
-- include: ../../common/openshift-master/restart.yml
-
-- include: ../../common/openshift-node/restart.yml
-
-- include: ../../common/openshift-cluster/redeploy-certificates/router.yml
- when: openshift_hosted_manage_router | default(true) | bool
-
-- include: ../../common/openshift-cluster/redeploy-certificates/registry.yml
- when: openshift_hosted_manage_registry | default(true) | bool
diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml
deleted file mode 100644
index 29f821eda..000000000
--- a/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- include: initialize_groups.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/redeploy-certificates/etcd-ca.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml
deleted file mode 100644
index f4f2ce00d..000000000
--- a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- include: initialize_groups.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/redeploy-certificates/check-expiry.yml
- vars:
- g_check_expiry_hosts: 'oo_etcd_to_config'
-
-- include: ../../common/openshift-cluster/redeploy-certificates/etcd-backup.yml
-
-- include: ../../common/openshift-etcd/certificates.yml
- vars:
- etcd_certificates_redeploy: true
-
-- include: ../../common/openshift-etcd/restart.yml
- vars:
- g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}"
-
-- include: ../../common/openshift-master/restart.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml
deleted file mode 100644
index 049bad8e7..000000000
--- a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- include: initialize_groups.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/redeploy-certificates/masters-backup.yml
-
-- include: ../../common/openshift-master/certificates.yml
- vars:
- openshift_certificates_redeploy: true
-
-- include: ../../common/openshift-master/restart.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml
deleted file mode 100644
index 345b0c689..000000000
--- a/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- include: initialize_groups.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/redeploy-certificates/nodes-backup.yml
-
-- include: ../../common/openshift-node/certificates.yml
- vars:
- openshift_certificates_redeploy: true
-
-- include: ../../common/openshift-node/restart.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml
deleted file mode 100644
index 6e11a111b..000000000
--- a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- include: initialize_groups.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/redeploy-certificates/openshift-ca.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml
deleted file mode 100644
index 30feabab3..000000000
--- a/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- include: initialize_groups.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/redeploy-certificates/registry.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml
deleted file mode 100644
index 2630fb234..000000000
--- a/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- include: initialize_groups.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/redeploy-certificates/router.yml
diff --git a/playbooks/byo/openshift-cluster/service-catalog.yml b/playbooks/byo/openshift-cluster/service-catalog.yml
deleted file mode 100644
index 40a7606e7..000000000
--- a/playbooks/byo/openshift-cluster/service-catalog.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-#
-# This playbook is a preview of upcoming changes for installing
-# Hosted logging on. See inventory/byo/hosts.*.example for the
-# currently supported method.
-#
-- include: initialize_groups.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/service_catalog.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/README.md
index 0f64f40f3..d9b1fc2ca 100644
--- a/playbooks/byo/openshift-cluster/upgrades/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/README.md
@@ -4,6 +4,5 @@ cluster. Additional notes for the associated upgrade playbooks are
provided in their respective directories.
# Upgrades available
+- [OpenShift Container Platform 3.6 to 3.7](v3_7/README.md) (works also to upgrade OpenShift Origin from 3.6.x to 3.7.x)
- [OpenShift Container Platform 3.5 to 3.6](v3_6/README.md) (works also to upgrade OpenShift Origin from 1.5.x to 3.6.x)
-- [OpenShift Container Platform 3.4 to 3.5](v3_5/README.md) (works also to upgrade OpenShift Origin from 1.4.x to 1.5.x)
-- [OpenShift Container Platform 3.3 to 3.4](v3_4/README.md) (works also to upgrade OpenShift Origin from 1.3.x to 1.4.x)
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
index 7f31e26e1..76308465c 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
@@ -1,5 +1,5 @@
---
# Playbook to upgrade Docker to the max allowable version for an OpenShift cluster.
-- include: ../../initialize_groups.yml
+- import_playbook: ../../../../init/evaluate_groups.yml
-- include: ../../../../common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/docker/docker_upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
deleted file mode 100644
index 5bd5d64ab..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: ../initialize_groups.yml
-
-- include: ../../../common/openshift-cluster/evaluate_groups.yml
-
-- include: ../../../common/openshift-cluster/upgrades/etcd/main.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
deleted file mode 100644
index 697a18c4d..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- include: ../../initialize_groups.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
deleted file mode 100644
index 4d284c279..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- include: ../../initialize_groups.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
deleted file mode 100644
index 180a2821f..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- include: ../../initialize_groups.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md
deleted file mode 100644
index 85b807dc6..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# v3.4 Major and Minor Upgrade Playbook
-
-## Overview
-This playbook currently performs the
-following steps.
-
- * Upgrade and restart master services
- * Unschedule node.
- * Upgrade and restart docker
- * Upgrade and restart node services
- * Modifies the subset of the configuration necessary
- * Applies the latest cluster policies
- * Updates the default router if one exists
- * Updates the default registry if one exists
- * Updates image streams and quickstarts
-
-## Usage
-ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
deleted file mode 100644
index 8cce91b3f..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- include: ../../initialize_groups.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
deleted file mode 100644
index d5329b858..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- include: ../../initialize_groups.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
deleted file mode 100644
index f44d55ad2..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- include: ../../initialize_groups.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
deleted file mode 100644
index 5b3f6ab06..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- include: ../../initialize_groups.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
index 40120b3e8..0effc68bf 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -2,6 +2,4 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../initialize_groups.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index 408a4c631..ebced5413 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -11,6 +11,4 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../initialize_groups.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index b5f42b804..f2e97fc01 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -4,6 +4,4 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../initialize_groups.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md
index 4bf53be81..914e0f5b2 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md
@@ -1,4 +1,4 @@
-# v3.6 Major and Minor Upgrade Playbook
+# v3.7 Major and Minor Upgrade Playbook
## Overview
This playbook currently performs the following steps.
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
index e41c29682..f6fedfdff 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -2,6 +2,4 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../initialize_groups.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index 21e0fd815..b8b5f5762 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -11,6 +11,4 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../initialize_groups.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
index 0e09d996e..c63f11b30 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -4,6 +4,4 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../initialize_groups.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml
new file mode 100644
index 000000000..23a3fcbb5
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml
@@ -0,0 +1,7 @@
+---
+#
+# Node Scale Group Upgrade Playbook
+#
+# Upgrades scale group nodes only.
+#
+- import_playbook: ../../../../common/openshift-cluster/upgrades/upgrade_scale_group.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md
index 6892f6324..815e49c28 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md
@@ -1,11 +1,10 @@
-# v3.3 Major and Minor Upgrade Playbook
+# v3.8 Major and Minor Upgrade Playbook
## Overview
-This playbook currently performs the
-following steps.
+This playbook currently performs the following steps.
* Upgrade and restart master services
- * Unschedule node.
+ * Unschedule node
* Upgrade and restart docker
* Upgrade and restart node services
* Modifies the subset of the configuration necessary
@@ -15,4 +14,7 @@ following steps.
* Updates image streams and quickstarts
## Usage
-ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
+
+```
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml
+```
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml
new file mode 100644
index 000000000..c4094aa7e
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -0,0 +1,5 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index 8e5d0f5f9..5a3aa6288 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -11,6 +11,4 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../initialize_groups.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
new file mode 100644
index 000000000..74981cc31
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
@@ -0,0 +1,7 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_9/README.md
index 53eebe65e..0ab3d3a52 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/README.md
@@ -1,11 +1,10 @@
-# v3.5 Major and Minor Upgrade Playbook
+# v3.9 Major and Minor Upgrade Playbook
## Overview
-This playbook currently performs the
-following steps.
+This playbook currently performs the following steps.
* Upgrade and restart master services
- * Unschedule node.
+ * Unschedule node
* Upgrade and restart docker
* Upgrade and restart node services
* Modifies the subset of the configuration necessary
@@ -15,4 +14,7 @@ following steps.
* Updates image streams and quickstarts
## Usage
-ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
+
+```
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml
+```
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml
new file mode 100644
index 000000000..a2a9d59f2
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -0,0 +1,5 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index 2377713fa..869e185af 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -11,6 +11,4 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../initialize_groups.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
new file mode 100644
index 000000000..a5867434b
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
@@ -0,0 +1,7 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-etcd/certificates.yml b/playbooks/byo/openshift-etcd/certificates.yml
deleted file mode 100644
index e35cf243f..000000000
--- a/playbooks/byo/openshift-etcd/certificates.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- include: ../openshift-cluster/initialize_groups.yml
-
-- include: ../../common/openshift-cluster/std_include.yml
-
-- include: ../../common/openshift-etcd/ca.yml
-
-- include: ../../common/openshift-etcd/certificates.yml
diff --git a/playbooks/byo/openshift-etcd/config.yml b/playbooks/byo/openshift-etcd/config.yml
deleted file mode 100644
index 1342bd60c..000000000
--- a/playbooks/byo/openshift-etcd/config.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: ../openshift-cluster/initialize_groups.yml
-
-- include: ../../common/openshift-cluster/std_include.yml
-
-- include: ../../common/openshift-etcd/config.yml
diff --git a/playbooks/byo/openshift-etcd/embedded2external.yml b/playbooks/byo/openshift-etcd/embedded2external.yml
deleted file mode 100644
index 6690a7624..000000000
--- a/playbooks/byo/openshift-etcd/embedded2external.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: ../openshift-cluster/initialize_groups.yml
-
-- include: ../../common/openshift-cluster/std_include.yml
-
-- include: ../../common/openshift-etcd/embedded2external.yml
diff --git a/playbooks/byo/openshift-etcd/filter_plugins b/playbooks/byo/openshift-etcd/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/byo/openshift-etcd/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-etcd/lookup_plugins b/playbooks/byo/openshift-etcd/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/byo/openshift-etcd/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-etcd/migrate.yml b/playbooks/byo/openshift-etcd/migrate.yml
deleted file mode 100644
index 2dec2bef6..000000000
--- a/playbooks/byo/openshift-etcd/migrate.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: ../openshift-cluster/initialize_groups.yml
-
-- include: ../../common/openshift-cluster/std_include.yml
-
-- include: ../../common/openshift-etcd/migrate.yml
diff --git a/playbooks/byo/openshift-etcd/restart.yml b/playbooks/byo/openshift-etcd/restart.yml
deleted file mode 100644
index 034bba4b4..000000000
--- a/playbooks/byo/openshift-etcd/restart.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: ../openshift-cluster/initialize_groups.yml
-
-- include: ../../common/openshift-cluster/std_include.yml
-
-- include: ../../common/openshift-etcd/restart.yml
diff --git a/playbooks/byo/openshift-etcd/scaleup.yml b/playbooks/byo/openshift-etcd/scaleup.yml
deleted file mode 100644
index a2a5856a9..000000000
--- a/playbooks/byo/openshift-etcd/scaleup.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: ../openshift-cluster/initialize_groups.yml
-
-- include: ../../common/openshift-cluster/std_include.yml
-
-- include: ../../common/openshift-etcd/scaleup.yml
diff --git a/playbooks/byo/openshift-glusterfs/config.yml b/playbooks/byo/openshift-glusterfs/config.yml
deleted file mode 100644
index 3f11f3991..000000000
--- a/playbooks/byo/openshift-glusterfs/config.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- include: ../openshift-cluster/initialize_groups.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
-
-- include: ../../common/openshift-glusterfs/config.yml
diff --git a/playbooks/byo/openshift-glusterfs/filter_plugins b/playbooks/byo/openshift-glusterfs/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/byo/openshift-glusterfs/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-glusterfs/lookup_plugins b/playbooks/byo/openshift-glusterfs/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/byo/openshift-glusterfs/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-glusterfs/registry.yml b/playbooks/byo/openshift-glusterfs/registry.yml
deleted file mode 100644
index 6ee6febdb..000000000
--- a/playbooks/byo/openshift-glusterfs/registry.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- include: ../openshift-cluster/initialize_groups.yml
- tags:
- - always
-
-- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
-
-- include: ../../common/openshift-glusterfs/registry.yml
diff --git a/playbooks/byo/openshift-loadbalancer/config.yml b/playbooks/byo/openshift-loadbalancer/config.yml
deleted file mode 100644
index 32c828f97..000000000
--- a/playbooks/byo/openshift-loadbalancer/config.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: ../openshift-cluster/initialize_groups.yml
-
-- include: ../../common/openshift-cluster/std_include.yml
-
-- include: ../../common/openshift-loadbalancer/config.yml
diff --git a/playbooks/byo/openshift-management/config.yml b/playbooks/byo/openshift-management/config.yml
deleted file mode 100644
index e8795ef85..000000000
--- a/playbooks/byo/openshift-management/config.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: ../openshift-cluster/initialize_groups.yml
-
-- include: ../../common/openshift-cluster/evaluate_groups.yml
-
-- include: ../../common/openshift-management/config.yml
diff --git a/playbooks/byo/openshift-management/uninstall.yml b/playbooks/byo/openshift-management/uninstall.yml
deleted file mode 100644
index a1fb1cdc4..000000000
--- a/playbooks/byo/openshift-management/uninstall.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-# - include: ../openshift-cluster/initialize_groups.yml
-
-- include: ../../common/openshift-management/uninstall.yml
diff --git a/playbooks/byo/openshift-master/additional_config.yml b/playbooks/byo/openshift-master/additional_config.yml
deleted file mode 100644
index b3d7b5731..000000000
--- a/playbooks/byo/openshift-master/additional_config.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: ../openshift-cluster/initialize_groups.yml
-
-- include: ../../common/openshift-cluster/std_include.yml
-
-- include: ../../common/openshift-master/additional_config.yml
diff --git a/playbooks/byo/openshift-master/certificates.yml b/playbooks/byo/openshift-master/certificates.yml
deleted file mode 100644
index e147dcba1..000000000
--- a/playbooks/byo/openshift-master/certificates.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: ../openshift-cluster/initialize_groups.yml
-
-- include: ../../common/openshift-cluster/std_include.yml
-
-- include: ../../common/openshift-master/certificates.yml
diff --git a/playbooks/byo/openshift-master/config.yml b/playbooks/byo/openshift-master/config.yml
deleted file mode 100644
index 98be0c448..000000000
--- a/playbooks/byo/openshift-master/config.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: ../openshift-cluster/initialize_groups.yml
-
-- include: ../../common/openshift-cluster/std_include.yml
-
-- include: ../../common/openshift-master/config.yml
diff --git a/playbooks/byo/openshift-master/filter_plugins b/playbooks/byo/openshift-master/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/byo/openshift-master/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-master/lookup_plugins b/playbooks/byo/openshift-master/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/byo/openshift-master/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml
deleted file mode 100644
index 8950efd00..000000000
--- a/playbooks/byo/openshift-master/restart.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: ../openshift-cluster/initialize_groups.yml
-
-- include: ../../common/openshift-cluster/std_include.yml
-
-- include: ../../common/openshift-master/restart.yml
diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml
deleted file mode 100644
index a09edd55a..000000000
--- a/playbooks/byo/openshift-master/scaleup.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- include: ../openshift-cluster/initialize_groups.yml
-
-- name: Ensure there are new_masters or new_nodes
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - fail:
- msg: >
- Detected no new_masters or no new_nodes in inventory. Please
- add hosts to the new_masters and new_nodes host groups to add
- masters.
- when:
- - (g_new_master_hosts | default([]) | length == 0) and (g_new_node_hosts | default([]) | length == 0)
-
-- include: ../../common/openshift-cluster/std_include.yml
-
-- include: ../../common/openshift-master/scaleup.yml
diff --git a/playbooks/byo/openshift-nfs/config.yml b/playbooks/byo/openshift-nfs/config.yml
deleted file mode 100644
index 93b24411e..000000000
--- a/playbooks/byo/openshift-nfs/config.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: ../openshift-cluster/initialize_groups.yml
-
-- include: ../../common/openshift-cluster/std_include.yml
-
-- include: ../../common/openshift-nfs/config.yml
diff --git a/playbooks/byo/openshift-node/certificates.yml b/playbooks/byo/openshift-node/certificates.yml
deleted file mode 100644
index 3d2de74a9..000000000
--- a/playbooks/byo/openshift-node/certificates.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: ../openshift-cluster/initialize_groups.yml
-
-- include: ../../common/openshift-cluster/std_include.yml
-
-- include: ../../common/openshift-node/certificates.yml
diff --git a/playbooks/byo/openshift-node/config.yml b/playbooks/byo/openshift-node/config.yml
deleted file mode 100644
index 839dc36ff..000000000
--- a/playbooks/byo/openshift-node/config.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: ../openshift-cluster/initialize_groups.yml
-
-- include: ../../common/openshift-cluster/std_include.yml
-
-- include: ../../common/openshift-node/config.yml
diff --git a/playbooks/byo/openshift-node/filter_plugins b/playbooks/byo/openshift-node/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/byo/openshift-node/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-node/lookup_plugins b/playbooks/byo/openshift-node/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/byo/openshift-node/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-node/network_manager.yml b/playbooks/byo/openshift-node/network_manager.yml
deleted file mode 100644
index b23692237..000000000
--- a/playbooks/byo/openshift-node/network_manager.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: ../openshift-cluster/initialize_groups.yml
-
-- include: ../../common/openshift-node/network_manager.yml
diff --git a/playbooks/byo/openshift-node/restart.yml b/playbooks/byo/openshift-node/restart.yml
deleted file mode 100644
index ccf9e82da..000000000
--- a/playbooks/byo/openshift-node/restart.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: ../openshift-cluster/initialize_groups.yml
-
-- include: ../../common/openshift-cluster/std_include.yml
-
-- include: ../../common/openshift-node/restart.yml
diff --git a/playbooks/byo/openshift-preflight/check.yml b/playbooks/byo/openshift-preflight/check.yml
deleted file mode 100644
index 2e53452a6..000000000
--- a/playbooks/byo/openshift-preflight/check.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# location is moved; this file remains so existing instructions keep working
-- include: ../openshift-checks/pre-install.yml
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
index a8c1c3a88..85a65b7e1 100644
--- a/playbooks/byo/openshift_facts.yml
+++ b/playbooks/byo/openshift_facts.yml
@@ -1,19 +1,12 @@
---
-- include: openshift-cluster/initialize_groups.yml
- tags:
- - always
-
-- include: ../common/openshift-cluster/std_include.yml
- tags:
- - always
+- import_playbook: ../init/main.yml
- name: Gather Cluster facts
- # Temporarily reverting to OSEv3 until group standardization is complete
- hosts: OSEv3
+ hosts: oo_all_hosts
roles:
- openshift_facts
tasks:
- - openshift_facts:
- openshift_env: "{{ hostvars[inventory_hostname] | oo_openshift_env }}"
+ - openshift_facts: {}
register: result
- - debug: var=result
+ - debug:
+ var: result
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index bc3109a31..f70f05bac 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -1,16 +1,14 @@
---
-- include: openshift-cluster/initialize_groups.yml
- tags:
- - always
+- import_playbook: ../init/evaluate_groups.yml
- name: Subscribe hosts, update repos and update OS packages
- # Temporarily reverting to OSEv3 until group standardization is complete
- hosts: OSEv3
+ hosts: oo_all_hosts
roles:
- role: rhel_subscribe
when:
- - deployment_type == 'openshift-enterprise'
+ - openshift_deployment_type == 'openshift-enterprise'
- ansible_distribution == "RedHat"
- - lookup('env', 'rhel_skip_subscription') | default(rhsub_skip, True) | default('no', True) | lower in ['no', 'false']
+ - rhsub_user is defined
+ - rhsub_pass is defined
- role: openshift_repos
- role: os_update_latest
diff --git a/playbooks/certificate_expiry b/playbooks/certificate_expiry
deleted file mode 120000
index 9cf5334a1..000000000
--- a/playbooks/certificate_expiry
+++ /dev/null
@@ -1 +0,0 @@
-byo/openshift-checks/certificate_expiry/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/cockpit-ui.yml b/playbooks/common/openshift-cluster/cockpit-ui.yml
deleted file mode 100644
index 5ddafdb07..000000000
--- a/playbooks/common/openshift-cluster/cockpit-ui.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Create Hosted Resources - cockpit-ui
- hosts: oo_first_master
- roles:
- - role: cockpit-ui
- when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
deleted file mode 100644
index 395eb51f1..000000000
--- a/playbooks/common/openshift-cluster/config.yml
+++ /dev/null
@@ -1,67 +0,0 @@
----
-# TODO: refactor this into its own include
-# and pass a variable for ctx
-- name: Verify Requirements
- hosts: oo_all_hosts
- roles:
- - openshift_health_checker
- vars:
- - r_openshift_health_checker_playbook_context: install
- post_tasks:
-
- - name: Verify Requirements - EL
- when: ansible_distribution != "Fedora"
- action: openshift_health_check
- args:
- checks:
- - disk_availability
- - memory_availability
- - package_availability
- - package_version
- - docker_image_availability
- - docker_storage
- - name: Verify Requirements - Fedora
- when: ansible_distribution == "Fedora"
- action: openshift_health_check
- args:
- checks:
- - docker_image_availability
-
-- include: ../openshift-etcd/config.yml
-
-- include: ../openshift-nfs/config.yml
- when: groups.oo_nfs_to_config | default([]) | count > 0
-
-- include: ../openshift-loadbalancer/config.yml
- when: groups.oo_lb_to_config | default([]) | count > 0
-
-- include: ../openshift-master/config.yml
-
-- include: ../openshift-master/additional_config.yml
-
-- include: ../openshift-node/config.yml
-
-- include: ../openshift-glusterfs/config.yml
- when: groups.oo_glusterfs_to_config | default([]) | count > 0
-
-- include: openshift_hosted.yml
-
-- include: openshift_metrics.yml
- when: openshift_metrics_install_metrics | default(false) | bool
-
-- include: openshift_logging.yml
- when: openshift_logging_install_logging | default(false) | bool
-
-- include: service_catalog.yml
- when: openshift_enable_service_catalog | default(false) | bool
-
-- include: ../openshift-management/config.yml
- when: openshift_management_install_management | default(false) | bool
-
-- name: Print deprecated variable warning message if necessary
- hosts: oo_first_master
- gather_facts: no
- tasks:
- - debug: msg="{{__deprecation_message}}"
- when:
- - __deprecation_message | default ('') | length > 0
diff --git a/playbooks/common/openshift-cluster/create_persistent_volumes.yml b/playbooks/common/openshift-cluster/create_persistent_volumes.yml
deleted file mode 100644
index ec6f2c52c..000000000
--- a/playbooks/common/openshift-cluster/create_persistent_volumes.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- name: Create persistent volumes
- hosts: oo_first_master
- vars:
- persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
- persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
- tasks:
- - debug: var=persistent_volumes
- - debug: var=persistent_volume_claims
-
-- name: Create Hosted Resources - persistent volumes
- hosts: oo_first_master
- vars:
- persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
- persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
- roles:
- - role: openshift_persistent_volumes
- when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0
diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
deleted file mode 100644
index be14b06f0..000000000
--- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml
+++ /dev/null
@@ -1,60 +0,0 @@
----
-- include: evaluate_groups.yml
-
-- name: Load openshift_facts
- hosts: oo_masters_to_config:oo_nodes_to_config
- roles:
- - openshift_facts
- post_tasks:
- - fail: msg="This playbook requires a master version of at least Origin 1.1 or OSE 3.1"
- when: not openshift.common.version_gte_3_1_1_or_1_1_1 | bool
-
-- name: Reconfigure masters to listen on our new dns_port
- hosts: oo_masters_to_config
- handlers:
- - include: ../../../roles/openshift_master/handlers/main.yml
- static: yes
- vars:
- os_firewall_allow:
- - service: skydns tcp
- port: "{{ openshift.master.dns_port }}/tcp"
- - service: skydns udp
- port: "{{ openshift.master.dns_port }}/udp"
- roles:
- - os_firewall
- tasks:
- - openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: master
- local_facts:
- dns_port: '8053'
- - modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: dnsConfig.bindAddress
- yaml_value: "{{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}"
- notify: restart master api
- - meta: flush_handlers
-
-- name: Configure nodes for dnsmasq
- hosts: oo_nodes_to_config
- handlers:
- - include: ../../../roles/openshift_node/handlers/main.yml
- static: yes
- pre_tasks:
- - openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: node
- local_facts:
- dns_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
- roles:
- - openshift_node_dnsmasq
- post_tasks:
- - modify_yaml:
- dest: "{{ openshift.common.config_base }}/node/node-config.yaml"
- yaml_key: dnsIP
- yaml_value: "{{ openshift.node.dns_ip }}"
- notify: restart node
diff --git a/playbooks/common/openshift-cluster/filter_plugins b/playbooks/common/openshift-cluster/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/common/openshift-cluster/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
deleted file mode 100644
index be2f8b5f4..000000000
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ /dev/null
@@ -1,156 +0,0 @@
----
-- name: Ensure that all non-node hosts are accessible
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config
- any_errors_fatal: true
- tasks:
-
-- name: Initialize host facts
- hosts: oo_all_hosts
- tasks:
- - name: load openshift_facts module
- include_role:
- name: openshift_facts
-
- # TODO: Should this role be refactored into health_checks??
- - name: Run openshift_sanitize_inventory to set variables
- include_role:
- name: openshift_sanitize_inventory
-
- - name: Detecting Operating System from ostree_booted
- stat:
- path: /run/ostree-booted
- register: ostree_booted
-
- # Locally setup containerized facts for now
- - name: initialize_facts set fact l_is_atomic
- set_fact:
- l_is_atomic: "{{ ostree_booted.stat.exists }}"
-
- - name: initialize_facts set fact for containerized and l_is_*_system_container
- set_fact:
- l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
- l_is_openvswitch_system_container: "{{ (openshift_use_openvswitch_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
- l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
- l_is_master_system_container: "{{ (openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
- l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
-
- - name: initialize_facts set facts for l_any_system_container
- set_fact:
- l_any_system_container: "{{ l_is_etcd_system_container or l_is_openvswitch_system_container or l_is_node_system_container or l_is_master_system_container }}"
-
- - name: initialize_facts set fact for l_etcd_runtime
- set_fact:
- l_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}"
-
- # TODO: Should this be moved into health checks??
- # Seems as though any check that happens with a corresponding fail should move into health_checks
- - name: Validate python version - ans_dist is fedora and python is v3
- fail:
- msg: |
- openshift-ansible requires Python 3 for {{ ansible_distribution }};
- For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html
- when:
- - ansible_distribution == 'Fedora'
- - ansible_python['version']['major'] != 3
-
- # TODO: Should this be moved into health checks??
- # Seems as though any check that happens with a corresponding fail should move into health_checks
- - name: Validate python version - ans_dist not Fedora and python must be v2
- fail:
- msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}"
- when:
- - ansible_distribution != 'Fedora'
- - ansible_python['version']['major'] != 2
-
- # TODO: Should this be moved into health checks??
- # Seems as though any check that happens with a corresponding fail should move into health_checks
- # Fail as early as possible if Atomic and old version of Docker
- - when:
- - l_is_atomic | bool
- block:
-
- # See https://access.redhat.com/articles/2317361
- # and https://github.com/ansible/ansible/issues/15892
- # NOTE: the "'s can not be removed at this level else the docker command will fail
- # NOTE: When ansible >2.2.1.x is used this can be updated per
- # https://github.com/openshift/openshift-ansible/pull/3475#discussion_r103525121
- - name: Determine Atomic Host Docker Version
- shell: 'CURLY="{"; docker version --format "$CURLY{json .Server.Version}}"'
- register: l_atomic_docker_version
-
- - name: assert atomic host docker version is 1.12 or later
- assert:
- that:
- - l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=')
- msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.
-
- - when:
- - not l_is_atomic | bool
- block:
- - name: Ensure openshift-ansible installer package deps are installed
- package:
- name: "{{ item }}"
- state: present
- with_items:
- - iproute
- - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
- - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
- - yum-utils
-
- - name: Ensure various deps for running system containers are installed
- package:
- name: "{{ item }}"
- state: present
- with_items:
- - atomic
- - ostree
- - runc
- when:
- - l_any_system_container | bool
-
- - name: Default system_images_registry to a enterprise registry
- set_fact:
- system_images_registry: "registry.access.redhat.com"
- when:
- - system_images_registry is not defined
- - openshift_deployment_type == "openshift-enterprise"
-
- - name: Default system_images_registry to community registry
- set_fact:
- system_images_registry: "docker.io"
- when:
- - system_images_registry is not defined
- - openshift_deployment_type == "origin"
-
- - name: Gather Cluster facts and set is_containerized if needed
- openshift_facts:
- role: common
- local_facts:
- deployment_type: "{{ openshift_deployment_type }}"
- deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
- cli_image: "{{ osm_image | default(None) }}"
- hostname: "{{ openshift_hostname | default(None) }}"
- ip: "{{ openshift_ip | default(None) }}"
- is_containerized: "{{ l_is_containerized | default(None) }}"
- is_openvswitch_system_container: "{{ l_is_openvswitch_system_container | default(false) }}"
- is_node_system_container: "{{ l_is_node_system_container | default(false) }}"
- is_master_system_container: "{{ l_is_master_system_container | default(false) }}"
- is_etcd_system_container: "{{ l_is_etcd_system_container | default(false) }}"
- etcd_runtime: "{{ l_etcd_runtime }}"
- system_images_registry: "{{ system_images_registry }}"
- public_hostname: "{{ openshift_public_hostname | default(None) }}"
- public_ip: "{{ openshift_public_ip | default(None) }}"
- portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
- http_proxy: "{{ openshift_http_proxy | default(None) }}"
- https_proxy: "{{ openshift_https_proxy | default(None) }}"
- no_proxy: "{{ openshift_no_proxy | default(None) }}"
- generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
- no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}"
-
- - name: initialize_facts set_fact repoquery command
- set_fact:
- repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
-
- - name: initialize_facts set_fact on openshift_docker_hosted_registry_network
- set_fact:
- openshift_docker_hosted_registry_network: "{{ '' if 'oo_first_master' not in groups else hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_repos.yml b/playbooks/common/openshift-cluster/initialize_openshift_repos.yml
deleted file mode 100644
index a7114fc80..000000000
--- a/playbooks/common/openshift-cluster/initialize_openshift_repos.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Setup yum repositories for all hosts
- hosts: oo_all_hosts
- gather_facts: no
- tasks:
- - name: initialize openshift repos
- include_role:
- name: openshift_repos
diff --git a/playbooks/common/openshift-cluster/library b/playbooks/common/openshift-cluster/library
deleted file mode 120000
index d0b7393d3..000000000
--- a/playbooks/common/openshift-cluster/library
+++ /dev/null
@@ -1 +0,0 @@
-../../../library/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/lookup_plugins b/playbooks/common/openshift-cluster/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/common/openshift-cluster/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
deleted file mode 100644
index c1536eb36..000000000
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-- name: Hosted Install Checkpoint Start
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set Hosted install 'In Progress'
- set_stats:
- data:
- installer_phase_hosted: "In Progress"
- aggregate: false
-
-- include: create_persistent_volumes.yml
-
-- include: openshift_default_storage_class.yml
-
-- include: openshift_hosted_create_projects.yml
-
-- include: openshift_hosted_router.yml
-
-- include: openshift_hosted_registry.yml
-
-- include: cockpit-ui.yml
-
-- include: openshift_prometheus.yml
- when: openshift_hosted_prometheus_deploy | default(False) | bool
-
-- name: Hosted Install Checkpoint End
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set Hosted install 'Complete'
- set_stats:
- data:
- installer_phase_hosted: "Complete"
- aggregate: false
diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml
deleted file mode 100644
index ac2d250a3..000000000
--- a/playbooks/common/openshift-cluster/openshift_prometheus.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- name: Create Hosted Resources - openshift_prometheus
- hosts: oo_first_master
- roles:
- - role: openshift_prometheus
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins b/playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins
deleted file mode 120000
index b1213dedb..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/library b/playbooks/common/openshift-cluster/redeploy-certificates/library
deleted file mode 120000
index 9a53f009d..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/library
+++ /dev/null
@@ -1 +0,0 @@
-../../../../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins b/playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins
deleted file mode 120000
index aff753026..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/roles b/playbooks/common/openshift-cluster/redeploy-certificates/roles
deleted file mode 120000
index 4bdbcbad3..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/sanity_checks.yml b/playbooks/common/openshift-cluster/sanity_checks.yml
deleted file mode 100644
index 26716a92d..000000000
--- a/playbooks/common/openshift-cluster/sanity_checks.yml
+++ /dev/null
@@ -1,51 +0,0 @@
----
-- name: Verify Requirements
- hosts: oo_all_hosts
- tasks:
- - fail:
- msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool
-
- - fail:
- msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool
-
- - fail:
- msg: Nuage sdn can not be used with flannel
- when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool
-
- - fail:
- msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool
-
- - fail:
- msg: Contiv can not be used with flannel
- when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool
-
- - fail:
- msg: Contiv can not be used with nuage
- when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool
-
- - fail:
- msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool
-
- - fail:
- msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both.
- when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool
-
- - fail:
- msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both
- when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool
-
- - fail:
- msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both
- when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool
-
- - fail:
- msg: openshift_hostname must be 63 characters or less
- when: openshift_hostname is defined and openshift_hostname | length > 63
-
- - fail:
- msg: openshift_public_hostname must be 63 characters or less
- when: openshift_public_hostname is defined and openshift_public_hostname | length > 63
diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml
deleted file mode 100644
index 45b34c8bd..000000000
--- a/playbooks/common/openshift-cluster/std_include.yml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-- name: Initialization Checkpoint Start
- hosts: oo_all_hosts
- gather_facts: false
- roles:
- - installer_checkpoint
- tasks:
- - name: Set install initialization 'In Progress'
- set_stats:
- data:
- installer_phase_initialize: "In Progress"
- aggregate: false
-
-- include: evaluate_groups.yml
- tags:
- - always
-
-- include: initialize_facts.yml
- tags:
- - always
-
-- include: sanity_checks.yml
- tags:
- - always
-
-- include: validate_hostnames.yml
- tags:
- - node
-
-- include: initialize_openshift_repos.yml
- tags:
- - always
-
-- include: initialize_openshift_version.yml
- tags:
- - always
-
-- name: Initialization Checkpoint End
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set install initialization 'Complete'
- set_stats:
- data:
- installer_phase_initialize: "Complete"
- aggregate: false
diff --git a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
deleted file mode 100644
index eb118365a..000000000
--- a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- set_fact: k8s_type="etcd"
-
-- name: Generate etcd instance names(s)
- set_fact:
- scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
- register: etcd_names_output
- with_sequence: count={{ num_etcd }}
-
-- set_fact:
- etcd_names: "{{ etcd_names_output.results | default([])
- | oo_collect('ansible_facts')
- | oo_collect('scratch_name') }}"
diff --git a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
deleted file mode 100644
index 783f70f50..000000000
--- a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- set_fact: k8s_type="master"
-
-- name: Generate master instance names(s)
- set_fact:
- scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
- register: master_names_output
- with_sequence: count={{ num_masters }}
-
-- set_fact:
- master_names: "{{ master_names_output.results | default([])
- | oo_collect('ansible_facts')
- | oo_collect('scratch_name') }}"
diff --git a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
deleted file mode 100644
index c103e40a9..000000000
--- a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-- set_fact: k8s_type=node
-- set_fact: sub_host_type="{{ type }}"
-- set_fact: number_nodes="{{ count }}"
-
-- name: Generate node instance names(s)
- set_fact:
- scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}"
- register: node_names_output
- with_sequence: count={{ number_nodes }}
-
-- set_fact:
- node_names: "{{ node_names_output.results | default([])
- | oo_collect('ansible_facts')
- | oo_collect('scratch_name') }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
index 6e953be69..ed97d539c 100644
--- a/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
+++ b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
@@ -1,22 +1 @@
---
-- name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- name: Remove unused Docker images for Docker 1.10+ migration
- shell: "docker rmi `docker images -aq`"
- # Will fail on images still in use:
- failed_when: false
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
index 23cf8cf76..372a39e74 100644
--- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
+++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
@@ -22,7 +22,7 @@
- name: Create service signer certificate
command: >
- {{ openshift.common.client_binary }} adm ca create-signer-cert
+ {{ openshift_client_binary }} adm ca create-signer-cert
--cert="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.crt
--key="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.key
--name="{{ remote_cert_create_tmpdir.stdout }}/"openshift-service-serving-signer
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_excluders.yml
index 800621857..858912379 100644
--- a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
+++ b/playbooks/common/openshift-cluster/upgrades/disable_excluders.yml
@@ -1,11 +1,10 @@
---
- name: Disable excluders
- hosts: oo_masters_to_config
+ hosts: "{{ l_upgrade_excluder_hosts }}"
gather_facts: no
roles:
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
r_openshift_excluder_verify_upgrade: true
r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
r_openshift_excluder_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
deleted file mode 100644
index a66301c0d..000000000
--- a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: Disable excluders
- hosts: oo_nodes_to_upgrade:!oo_masters_to_config
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- r_openshift_excluder_verify_upgrade: true
- r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
- r_openshift_excluder_package_state: latest
- r_openshift_excluder_docker_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 98953f72e..ffb11670d 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -1,25 +1,24 @@
---
-- include: ../../evaluate_groups.yml
+- import_playbook: ../../../../init/evaluate_groups.yml
vars:
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
- name: Check for appropriate Docker versions
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
roles:
- openshift_facts
tasks:
- - set_fact:
- repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
-
- fail:
msg: Cannot upgrade Docker on Atomic operating systems.
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- - include: upgrade_check.yml
+ - import_role:
+ name: container_runtime
+ tasks_from: docker_upgrade_check.yml
when: docker_upgrade is not defined or docker_upgrade | bool
@@ -32,6 +31,7 @@
any_errors_fatal: true
roles:
+ - openshift_facts
- lib_openshift
tasks:
@@ -43,7 +43,7 @@
retries: 10
delay: 5
register: node_unschedulable
- until: node_unschedulable|succeeded
+ until: node_unschedulable is succeeded
when:
- l_docker_upgrade is defined
- l_docker_upgrade | bool
@@ -51,15 +51,15 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ openshift_client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
register: l_docker_upgrade_drain_result
- until: not l_docker_upgrade_drain_result | failed
+ until: not (l_docker_upgrade_drain_result is failed)
retries: 60
delay: 60
- - include: tasks/upgrade.yml
+ - include_tasks: tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool
- name: Set node schedulability
@@ -70,5 +70,5 @@
retries: 10
delay: 5
register: node_schedulable
- until: node_schedulable|succeeded
- when: node_unschedulable|changed
+ until: node_schedulable is succeeded
+ when: node_unschedulable is changed
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh b/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh
deleted file mode 100644
index 8635eab0d..000000000
--- a/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-
-# Stop any running containers
-running_container_ids=`docker ps -q`
-if test -n "$running_container_ids"
-then
- docker stop $running_container_ids
-fi
-
-# Delete all containers
-container_ids=`docker ps -a -q`
-if test -n "$container_ids"
-then
- docker rm -f -v $container_ids
-fi
-
-# Delete all images (forcefully)
-image_ids=`docker images -aq`
-if test -n "$image_ids"
-then
- # Some layers are deleted recursively and are no longer present
- # when docker goes to remove them:
- docker rmi -f `docker images -aq` || true
-fi
-
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
index 83f16ac0d..3b47a11e0 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
@@ -2,24 +2,20 @@
- name: Restart docker
service: name=docker state=restarted
register: l_docker_restart_docker_in_upgrade_result
- until: not l_docker_restart_docker_in_upgrade_result | failed
+ until: not (l_docker_restart_docker_in_upgrade_result is failed)
retries: 3
delay: 30
-- name: Update docker facts
- openshift_facts:
- role: docker
-
- name: Restart containerized services
service: name={{ item }} state=started
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
failed_when: false
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
- name: Wait for master API to come back online
wait_for:
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
index 808cc562c..54eeb2ef5 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
@@ -4,13 +4,13 @@
- name: Stop containerized services
service: name={{ item }} state=stopped
with_items:
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
- etcd_container
- openvswitch
failed_when: false
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
- name: Check Docker image count
shell: "docker images -aq | wc -l"
@@ -35,12 +35,14 @@
name: docker
state: stopped
register: l_pb_docker_upgrade_stop_result
- until: not l_pb_docker_upgrade_stop_result | failed
+ until: not (l_pb_docker_upgrade_stop_result is failed)
retries: 3
delay: 30
- name: Upgrade Docker
package: name=docker{{ '-' + docker_version }} state=present
+ register: result
+ until: result is succeeded
-- include: restart.yml
+- include_tasks: restart.yml
when: not skip_docker_restart | default(False) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
index 52345a9ba..ed97d539c 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -1,58 +1 @@
---
-
-# This snippet determines if a Docker upgrade is required by checking the inventory
-# variables, the available packages, and sets l_docker_upgrade to True if so.
-
-- set_fact:
- docker_upgrade: True
- when: docker_upgrade is not defined
-
-- name: Check if Docker is installed
- command: rpm -q docker
- args:
- warn: no
- register: pkg_check
- failed_when: pkg_check.rc > 1
- changed_when: no
-
-- name: Get current version of Docker
- command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
- register: curr_docker_version
- retries: 4
- until: curr_docker_version | succeeded
- changed_when: false
-
-- name: Get latest available version of Docker
- command: >
- {{ repoquery_cmd }} --qf '%{version}' "docker"
- register: avail_docker_version
- retries: 4
- until: avail_docker_version | succeeded
- # Don't expect docker rpm to be available on hosts that don't already have it installed:
- when: pkg_check.rc == 0
- failed_when: false
- changed_when: false
-
-- fail:
- msg: This playbook requires access to Docker 1.12 or later
- # Disable the 1.12 requirement if the user set a specific Docker version
- when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.12','<')))
-
-# Default l_docker_upgrade to False, we'll set to True if an upgrade is required:
-- set_fact:
- l_docker_upgrade: False
-
-# Make sure a docker_version is set if none was requested:
-- set_fact:
- docker_version: "{{ avail_docker_version.stdout }}"
- when: pkg_check.rc == 0 and docker_version is not defined
-
-- name: Flag for Docker upgrade if necessary
- set_fact:
- l_docker_upgrade: True
- when: pkg_check.rc == 0 and curr_docker_version.stdout | version_compare(docker_version,'<')
-
-- name: Flag to delete all images prior to upgrade if crossing Docker 1.10 boundary
- set_fact:
- docker_upgrade_nuke_images: True
- when: l_docker_upgrade | bool and docker_upgrade_nuke_images is not defined and curr_docker_version.stdout | version_compare('1.10','<') and docker_version | version_compare('1.10','>=')
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins
deleted file mode 120000
index 27ddaa18b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins
deleted file mode 120000
index cf407f69b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/roles b/playbooks/common/openshift-cluster/upgrades/etcd/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
deleted file mode 100644
index d71c96cd7..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
+++ /dev/null
@@ -1,108 +0,0 @@
----
-- name: Determine etcd version
- hosts: oo_etcd_hosts_to_upgrade
- tasks:
- - block:
- - name: Record RPM based etcd version
- command: rpm -qa --qf '%{version}' etcd\*
- args:
- warn: no
- register: etcd_rpm_version
- failed_when: false
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
- - debug:
- msg: "Etcd rpm version {{ etcd_rpm_version.stdout }} detected"
- when:
- - not openshift.common.is_containerized | bool
-
- - block:
- - name: Record containerized etcd version (docker)
- command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
- register: etcd_container_version_docker
- failed_when: false
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
- when:
- - not openshift.common.is_etcd_system_container | bool
-
- # Given a register variables is set even if the whwen condition
- # is false, we need to set etcd_container_version separately
- - set_fact:
- etcd_container_version: "{{ etcd_container_version_docker.stdout }}"
- when:
- - not openshift.common.is_etcd_system_container | bool
-
- - name: Record containerized etcd version (runc)
- command: runc exec etcd rpm -qa --qf '%{version}' etcd\*
- register: etcd_container_version_runc
- failed_when: false
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
- when:
- - openshift.common.is_etcd_system_container | bool
-
- # Given a register variables is set even if the whwen condition
- # is false, we need to set etcd_container_version separately
- - set_fact:
- etcd_container_version: "{{ etcd_container_version_runc.stdout }}"
- when:
- - openshift.common.is_etcd_system_container | bool
-
- - debug:
- msg: "Etcd containerized version {{ etcd_container_version }} detected"
- when:
- - openshift.common.is_containerized | bool
-
-- include: upgrade_rpm_members.yml
- vars:
- etcd_upgrade_version: '2.1'
-
-- include: upgrade_rpm_members.yml
- vars:
- etcd_upgrade_version: '2.2'
-
-- include: upgrade_image_members.yml
- vars:
- etcd_upgrade_version: '2.2.5'
-
-- include: upgrade_rpm_members.yml
- vars:
- etcd_upgrade_version: '2.3'
-
-- include: upgrade_image_members.yml
- vars:
- etcd_upgrade_version: '2.3.7'
-
-- include: upgrade_rpm_members.yml
- vars:
- etcd_upgrade_version: '3.0'
-
-- include: upgrade_image_members.yml
- vars:
- etcd_upgrade_version: '3.0.15'
-
-- include: upgrade_rpm_members.yml
- vars:
- etcd_upgrade_version: '3.1'
-
-- include: upgrade_image_members.yml
- vars:
- etcd_upgrade_version: '3.1.3'
-
-- name: Upgrade fedora to latest
- hosts: oo_etcd_hosts_to_upgrade
- serial: 1
- tasks:
- - include_role:
- name: etcd
- tasks_from: upgrade_image
- vars:
- r_etcd_common_etcd_runtime: "host"
- etcd_peer: "{{ openshift.common.hostname }}"
- when:
- - ansible_distribution == 'Fedora'
- - not openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/filter_plugins b/playbooks/common/openshift-cluster/upgrades/filter_plugins
deleted file mode 120000
index b1213dedb..000000000
--- a/playbooks/common/openshift-cluster/upgrades/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 2826951e6..8ee83819e 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -1,15 +1,20 @@
---
-- include: ../evaluate_groups.yml
+- import_playbook: ../../../init/evaluate_groups.yml
vars:
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
-- include: ../initialize_facts.yml
+- import_playbook: ../../../init/facts.yml
- name: Ensure firewall is not switched during upgrade
- hosts: oo_all_hosts
+ hosts: "{{ l_upgrade_no_switch_firewall_hosts | default('oo_all_hosts') }}"
+ vars:
+ openshift_master_installed_version: "{{ hostvars[groups.oo_first_master.0].openshift.common.version }}"
tasks:
+ - name: set currently installed version
+ set_fact:
+ openshift_currently_installed_version: "{{ openshift_master_installed_version }}"
- name: Check if iptables is running
command: systemctl status iptables
changed_when: false
diff --git a/playbooks/common/openshift-cluster/upgrades/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/lookup_plugins
deleted file mode 120000
index aff753026..000000000
--- a/playbooks/common/openshift-cluster/upgrades/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index 122066955..1b57521df 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -14,8 +14,9 @@
pre_tasks:
- name: Load lib_openshift modules
- include_role:
+ import_role:
name: lib_openshift
+
- name: Collect all routers
oc_obj:
state: list
@@ -26,8 +27,8 @@
- set_fact:
haproxy_routers: "{{ all_routers.results.results[0]['items'] |
- oo_pods_match_component(openshift_deployment_type, 'haproxy-router') |
- oo_select_keys_from_list(['metadata']) }}"
+ lib_utils_oo_pods_match_component(openshift_deployment_type, 'haproxy-router') |
+ lib_utils_oo_select_keys_from_list(['metadata']) }}"
when:
- all_routers.results.returncode == 0
@@ -85,17 +86,19 @@
roles:
- openshift_manageiq
+ - role: openshift_project_request_template
+ when: openshift_project_request_template_manage
# Create the new templates shipped in 3.2, existing templates are left
# unmodified. This prevents the subsequent role definition for
# openshift_examples from failing when trying to replace templates that do
# not already exist. We could have potentially done a replace --force to
# create and update in one step.
- role: openshift_examples
- when: openshift_install_examples | default(true,true) | bool
+ when: openshift_install_examples | default(true) | bool
- openshift_hosted_templates
# Update the existing templates
- role: openshift_examples
- when: openshift_install_examples | default(true,true) | bool
+ when: openshift_install_examples | default(true) | bool
registry_url: "{{ openshift.master.registry_url }}"
openshift_examples_import_command: replace
- role: openshift_hosted_templates
@@ -111,13 +114,11 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
post_tasks:
# Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond:
- name: grep pluginOrderOverride
command: grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml
register: grep_plugin_order_override
- when: openshift.common.version_gte_3_3_or_1_3 | bool
changed_when: false
failed_when: false
@@ -125,7 +126,7 @@
debug:
msg: "WARNING pluginOrderOverride is being deprecated in master-config.yaml, please see https://docs.openshift.com/enterprise/latest/architecture/additional_concepts/admission_controllers.html for more information."
when:
- - not grep_plugin_order_override | skipped
+ - not (grep_plugin_order_override is skipped)
- grep_plugin_order_override.rc == 0
- name: Warn if shared-resource-viewer could not be updated
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
new file mode 100644
index 000000000..cfc0c8745
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
@@ -0,0 +1,77 @@
+---
+
+# Pre-upgrade
+- import_playbook: ../initialize_nodes_to_upgrade.yml
+
+- import_playbook: verify_cluster.yml
+
+- name: Update repos on upgrade hosts
+ hosts: "{{ l_upgrade_repo_hosts }}"
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: "{{ l_upgrade_no_proxy_hosts }}"
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- name: OpenShift Health Checks
+ hosts: "{{ l_upgrade_health_check_hosts }}"
+ any_errors_fatal: true
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: upgrade
+ post_tasks:
+ - name: Run health checks (upgrade)
+ action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
+ - docker_image_availability
+
+- import_playbook: ../disable_excluders.yml
+
+- import_playbook: ../../../../init/version.yml
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+# If we're only upgrading nodes, we need to ensure masters are already upgraded
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when:
+ - l_upgrade_nodes_only | default(False) | bool
+ - openshift.common.version != openshift_version
+
+# If we're only upgrading nodes, skip this.
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
+ when: not (l_upgrade_nodes_only | default(False)) | bool
+
+- name: Verify upgrade targets
+ hosts: "{{ l_upgrade_verify_targets_hosts }}"
+ roles:
+ - role: openshift_facts
+ tasks:
+ - include_tasks: verify_upgrade_targets.yml
+
+- name: Verify docker upgrade targets
+ hosts: "{{ l_upgrade_docker_target_hosts }}"
+ tasks:
+ - import_role:
+ name: container_runtime
+ tasks_from: docker_upgrade_check.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml
deleted file mode 100644
index 8ecae4539..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Flag pre-upgrade checks complete for hosts without errors
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - set_fact:
- pre_upgrade_complete: True
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
deleted file mode 100644
index 6d8503879..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-# Only check if docker upgrade is required if docker_upgrade is not
-# already set to False.
-- include: ../../docker/upgrade_check.yml
- when:
- - docker_upgrade is not defined or (docker_upgrade | bool)
- - not (openshift.common.is_atomic | bool)
-
-# Additional checks for Atomic hosts:
-
-- name: Determine available Docker
- shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
- register: g_atomic_docker_version_result
- when: openshift.common.is_atomic | bool
-
-- set_fact:
- l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
- when: openshift.common.is_atomic | bool
-
-- fail:
- msg: This playbook requires access to Docker 1.12 or later
- when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
new file mode 100644
index 000000000..693ab2d96
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
@@ -0,0 +1,94 @@
+---
+# Verify a few items before we proceed with upgrade process.
+
+- name: Verify upgrade can proceed on first master
+ hosts: oo_first_master
+ gather_facts: no
+ tasks:
+
+ # Error out in situations where the user has older versions specified in their
+ # inventory in any of the openshift_release, openshift_image_tag, and
+ # openshift_pkg_version variables. These must be removed or updated to proceed
+ # with upgrade.
+ # TODO: Should we block if you're *over* the next major release version as well?
+ - fail:
+ msg: >
+ openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+ valid version for a {{ openshift_upgrade_target }} upgrade
+ when:
+ - openshift_pkg_version is defined
+ - openshift_pkg_version.split('-',1).1 is version_compare(openshift_upgrade_target ,'<')
+
+ - fail:
+ msg: >
+ openshift_image_tag is {{ openshift_image_tag }} which is not a
+ valid version for a {{ openshift_upgrade_target }} upgrade
+ when:
+ - openshift_image_tag is defined
+ - openshift_image_tag.split('v',1).1 is version_compare(openshift_upgrade_target ,'<')
+
+ - set_fact:
+ openshift_release: "{{ openshift_release[1:] }}"
+ when: openshift_release is defined and openshift_release[0] == 'v'
+
+ - fail:
+ msg: >
+ openshift_release is {{ openshift_release }} which is not a
+ valid release for a {{ openshift_upgrade_target }} upgrade
+ when:
+ - openshift_release is defined
+ - not (openshift_release is version_compare(openshift_upgrade_target ,'='))
+
+- name: Verify master processes
+ hosts: oo_masters_to_config
+ roles:
+ - lib_utils
+ - openshift_facts
+ tasks:
+ - name: Read master storage backend setting
+ yedit:
+ state: list
+ src: /etc/origin/master/master-config.yaml
+ key: kubernetesMasterConfig.apiServerArguments.storage-backend
+ register: _storage_backend
+
+ - fail:
+ msg: "Storage backend in /etc/origin/master/master-config.yaml must be set to 'etcd3' before the upgrade can continue"
+ when:
+ # assuming the master-config.yml is properly configured, i.e. the value is a list
+ - _storage_backend.result | default([], true) | length == 0 or _storage_backend.result[0] != "etcd3"
+
+ - debug:
+ msg: "Storage backend is set to etcd3"
+
+ - openshift_facts:
+ role: master
+ local_facts:
+ ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+
+ - when: openshift_is_containerized | bool
+ block:
+ - set_fact:
+ master_services:
+ - "{{ openshift_service_type }}-master"
+
+ # In case of the non-ha to ha upgrade.
+ - name: Check if the {{ openshift_service_type }}-master-api.service exists
+ command: >
+ systemctl list-units {{ openshift_service_type }}-master-api.service --no-legend
+ register: master_api_service_status
+
+ - set_fact:
+ master_services:
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ when:
+ - master_api_service_status.stdout_lines | length > 0
+ - (openshift_service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
+
+ - name: Ensure Master is running
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: yes
+ with_items: "{{ master_services }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
deleted file mode 100644
index 45022cd61..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: Verify master processes
- hosts: oo_masters_to_config
- roles:
- - openshift_facts
- tasks:
- - openshift_facts:
- role: master
- local_facts:
- ha: "{{ groups.oo_masters_to_config | length > 1 }}"
-
- - name: Ensure HA Master is running
- service:
- name: "{{ openshift.common.service_type }}-master-api"
- state: started
- enabled: yes
- when: openshift.common.is_containerized | bool
-
- - name: Ensure HA Master is running
- service:
- name: "{{ openshift.common.service_type }}-master-controllers"
- state: started
- enabled: yes
- when: openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml
deleted file mode 100644
index f75ae3b15..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-- name: Verify all masters has etcd3 storage backend set
- hosts: oo_masters_to_config
- gather_facts: no
- roles:
- - lib_utils
- tasks:
- - name: Read master storage backend setting
- yedit:
- state: list
- src: /etc/origin/master/master-config.yaml
- key: kubernetesMasterConfig.apiServerArguments.storage-backend
- register: _storage_backend
-
- - fail:
- msg: "Storage backend in /etc/origin/master/master-config.yaml must be set to 'etcd3' before the upgrade can continue"
- when:
- # assuming the master-config.yml is properly configured, i.e. the value is a list
- - _storage_backend.result | default([], true) | length == 0 or _storage_backend.result[0] != "etcd3"
-
- - debug:
- msg: "Storage backend is set to etcd3"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
deleted file mode 100644
index 2a8de50a2..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: OpenShift Health Checks
- hosts: oo_all_hosts
- any_errors_fatal: true
- roles:
- - openshift_health_checker
- vars:
- - r_openshift_health_checker_playbook_context: upgrade
- post_tasks:
- - name: Run health checks (upgrade)
- action: openshift_health_check
- args:
- checks:
- - disk_availability
- - memory_availability
- - docker_image_availability
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
deleted file mode 100644
index 3c0017891..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- name: Verify upgrade can proceed on first master
- hosts: oo_first_master
- gather_facts: no
- tasks:
- - fail:
- msg: >
- This upgrade is only supported for origin and openshift-enterprise
- deployment types
- when: deployment_type not in ['origin','openshift-enterprise']
-
- # Error out in situations where the user has older versions specified in their
- # inventory in any of the openshift_release, openshift_image_tag, and
- # openshift_pkg_version variables. These must be removed or updated to proceed
- # with upgrade.
- # TODO: Should we block if you're *over* the next major release version as well?
- - fail:
- msg: >
- openshift_pkg_version is {{ openshift_pkg_version }} which is not a
- valid version for a {{ openshift_upgrade_target }} upgrade
- when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<')
-
- - fail:
- msg: >
- openshift_image_tag is {{ openshift_image_tag }} which is not a
- valid version for a {{ openshift_upgrade_target }} upgrade
- when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<')
-
- - set_fact:
- openshift_release: "{{ openshift_release[1:] }}"
- when: openshift_release is defined and openshift_release[0] == 'v'
-
- - fail:
- msg: >
- openshift_release is {{ openshift_release }} which is not a
- valid release for a {{ openshift_upgrade_target }} upgrade
- when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 142ce5f3d..4c1156f4b 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -4,18 +4,24 @@
msg: Verify OpenShift is already installed
when: openshift.common.version is not defined
+- name: Update oreg_auth docker login credentials if necessary
+ import_role:
+ name: container_runtime
+ tasks_from: registry_auth.yml
+ when: oreg_auth_user is defined
+
- name: Verify containers are available for upgrade
command: >
- docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
+ docker pull {{ openshift_cli_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
-- when: not openshift.common.is_containerized | bool
+- when: not openshift_is_containerized | bool
block:
- name: Check latest available OpenShift RPM version
repoquery:
- name: "{{ openshift.common.service_type }}"
+ name: "{{ openshift_service_type }}"
ignore_excluders: true
register: repoquery_out
@@ -37,11 +43,11 @@
fail:
msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required"
when:
- - openshift_pkg_version | default('0.0', True) | version_compare(openshift_release, '<')
+ - (openshift_pkg_version | default('-0.0', True)).split('-')[1] is version_compare(openshift_release, '<')
- name: Fail when openshift version does not meet minium requirement for Origin upgrade
fail:
msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
when:
- - deployment_type == 'origin'
- - openshift.common.version | version_compare(openshift_upgrade_min,'<')
+ - openshift_deployment_type == 'origin'
+ - openshift.common.version is version_compare(openshift_upgrade_min,'<')
diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
deleted file mode 100644
index 8cc46ab68..000000000
--- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-# When we update package "a-${version}" and a requires b >= ${version} if we
-# don't specify the version of b yum will choose the latest version of b
-# available and the whole set of dependencies end up at the latest version.
-# Since the package module, unlike the yum module, doesn't flatten a list
-# of packages into one transaction we need to do that explicitly. The ansible
-# core team tells us not to rely on yum module transaction flattening anyway.
-
-# TODO: If the sdn package isn't already installed this will install it, we
-# should fix that
-- name: Upgrade master packages
- package: name={{ master_pkgs | join(',') }} state=present
- vars:
- master_pkgs:
- - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
- - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - PyYAML
- when:
- - component == "master"
- - not openshift.common.is_atomic | bool
-
-- name: Upgrade node packages
- package: name={{ node_pkgs | join(',') }} state=present
- vars:
- node_pkgs:
- - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
- - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - PyYAML
- when:
- - component == "node"
- - not openshift.common.is_atomic | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index c37a5f9ab..50be0dee0 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -2,6 +2,21 @@
###############################################################################
# Upgrade Masters
###############################################################################
+- name: Backup and upgrade etcd
+ import_playbook: ../../../openshift-etcd/private/upgrade_main.yml
+
+# Create service signer cert when missing. Service signer certificate
+# is added to master config in the master_config_upgrade hook.
+- name: Determine if service signer cert must be created
+ hosts: oo_first_master
+ tasks:
+ - name: Determine if service signer certificate must be created
+ stat:
+ path: "{{ openshift.common.config_base }}/master/service-signer.crt"
+ register: service_signer_cert_stat
+ changed_when: false
+
+- import_playbook: create_service_signer_cert.yml
# oc adm migrate storage should be run prior to etcd v3 upgrade
# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
@@ -10,7 +25,7 @@
tasks:
- name: Upgrade all storage
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
migrate storage --include=* --confirm
register: l_pb_upgrade_control_plane_pre_upgrade_storage
when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
@@ -19,35 +34,6 @@
- l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0
- openshift_upgrade_pre_storage_migration_fatal | default(true) | bool
-# If facts cache were for some reason deleted, this fact may not be set, and if not set
-# it will always default to true. This causes problems for the etcd data dir fact detection
-# so we must first make sure this is set correctly before attempting the backup.
-- name: Set master embedded_etcd fact
- hosts: oo_masters_to_config
- roles:
- - openshift_facts
- tasks:
- - openshift_facts:
- role: master
- local_facts:
- embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
-
-- name: Upgrade and backup etcd
- include: ./etcd/main.yml
-
-# Create service signer cert when missing. Service signer certificate
-# is added to master config in the master_config_upgrade hook.
-- name: Determine if service signer cert must be created
- hosts: oo_first_master
- tasks:
- - name: Determine if service signer certificate must be created
- stat:
- path: "{{ openshift.common.config_base }}/master/service-signer.crt"
- register: service_signer_cert_stat
- changed_when: false
-
-- include: create_service_signer_cert.yml
-
# Set openshift_master_facts separately. In order to reconcile
# admission_config's, we currently must run openshift_master_facts and
# then run openshift_facts.
@@ -63,94 +49,49 @@
vars:
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
- handlers:
- - include: ../../../../roles/openshift_master/handlers/main.yml
- static: yes
- roles:
- - openshift_facts
- - lib_utils
- post_tasks:
+ tasks:
+ - import_role:
+ name: openshift_facts
# Run the pre-upgrade hook if defined:
- debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
- - include: "{{ openshift_master_upgrade_pre_hook }}"
+ - include_tasks: "{{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
- - include: rpm_upgrade.yml component=master
- when: not openshift.common.is_containerized | bool
-
- - include_vars: ../../../../roles/openshift_master_facts/vars/main.yml
-
- - include: upgrade_scheduler.yml
-
- - include: "{{ master_config_hook }}"
- when: master_config_hook is defined
-
- - include_vars: ../../../../roles/openshift_master/vars/main.yml
-
- - name: Remove any legacy systemd units and update systemd units
- include: ../../../../roles/openshift_master/tasks/systemd_units.yml
-
- - name: Check for ca-bundle.crt
- stat:
- path: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
- register: ca_bundle_stat
- failed_when: false
-
- - name: Check for ca.crt
- stat:
- path: "{{ openshift.common.config_base }}/master/ca.crt"
- register: ca_crt_stat
- failed_when: false
-
- - name: Migrate ca.crt to ca-bundle.crt
- command: mv ca.crt ca-bundle.crt
- args:
- chdir: "{{ openshift.common.config_base }}/master"
- when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists
-
- - name: Link ca.crt to ca-bundle.crt
- file:
- src: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
- path: "{{ openshift.common.config_base }}/master/ca.crt"
- state: link
- when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists
-
- - name: Update oreg value
- yedit:
- src: "{{ openshift.common.config_base }}/master/master-config.yaml"
- key: 'imageConfig.format'
- value: "{{ oreg_url | default(oreg_url_master) }}"
- when: oreg_url is defined or oreg_url_master is defined
+ - import_role:
+ name: openshift_master
+ tasks_from: upgrade.yml
# Run the upgrade hook prior to restarting services/system if defined:
- debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}"
when: openshift_master_upgrade_hook is defined
- - include: "{{ openshift_master_upgrade_hook }}"
+ - include_tasks: "{{ openshift_master_upgrade_hook }}"
when: openshift_master_upgrade_hook is defined
- - include: ../../openshift-master/restart_hosts.yml
+ - include_tasks: ../../../openshift-master/private/tasks/restart_hosts.yml
when: openshift.common.rolling_restart_mode == 'system'
- - include: ../../openshift-master/restart_services.yml
+ - include_tasks: ../../../openshift-master/private/tasks/restart_services.yml
when: openshift.common.rolling_restart_mode == 'services'
# Run the post-upgrade hook if defined:
- debug: msg="Running master post-upgrade hook {{ openshift_master_upgrade_post_hook }}"
when: openshift_master_upgrade_post_hook is defined
- - include: "{{ openshift_master_upgrade_post_hook }}"
+ - include_tasks: "{{ openshift_master_upgrade_post_hook }}"
when: openshift_master_upgrade_post_hook is defined
- name: Post master upgrade - Upgrade clusterpolicies storage
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
migrate storage --include=clusterpolicies --confirm
register: l_pb_upgrade_control_plane_post_upgrade_storage
- when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool
+ when:
+ - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
+ - openshift_version is version_compare('3.7','<')
failed_when:
- openshift_upgrade_post_storage_migration_enabled | default(true) | bool
- l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
@@ -171,8 +112,8 @@
tasks:
- set_fact:
master_update_completed: "{{ hostvars
- | oo_select_keys(groups.oo_masters_to_config)
- | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
+ | lib_utils_oo_select_keys(groups.oo_masters_to_config)
+ | lib_utils_oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
- set_fact:
master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) | list }}"
- fail:
@@ -188,18 +129,14 @@
roles:
- { role: openshift_cli }
vars:
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
- # restart.
- skip_docker_role: True
__master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml"
tasks:
- name: Reconcile Cluster Roles
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-roles --additive-only=true --confirm -o name
register: reconcile_cluster_role_result
- when: not openshift.common.version_gte_3_7 | bool
+ when: openshift_version is version_compare('3.7','<')
changed_when:
- reconcile_cluster_role_result.stdout != ''
- reconcile_cluster_role_result.rc == 0
@@ -207,14 +144,14 @@
- name: Reconcile Cluster Role Bindings
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-role-bindings
--exclude-groups=system:authenticated
--exclude-groups=system:authenticated:oauth
--exclude-groups=system:unauthenticated
--exclude-users=system:anonymous
--additive-only=true --confirm -o name
- when: not openshift.common.version_gte_3_7 | bool
+ when: openshift_version is version_compare('3.7','<')
register: reconcile_bindings_result
changed_when:
- reconcile_bindings_result.stdout != ''
@@ -223,15 +160,16 @@
- name: Reconcile Jenkins Pipeline Role Bindings
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm -o name
+ {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm -o name
run_once: true
register: reconcile_jenkins_role_binding_result
changed_when:
- reconcile_jenkins_role_binding_result.stdout != ''
- reconcile_jenkins_role_binding_result.rc == 0
- when: (not openshift.common.version_gte_3_7 | bool) and (openshift.common.version_gte_3_4_or_1_4 | bool)
+ when:
+ - openshift_version is version_compare('3.7','<')
- - when: (openshift.common.version_gte_3_6 | bool) and (not openshift.common.version_gte_3_7 | bool)
+ - when: openshift_upgrade_target is version_compare('3.7','<')
block:
- name: Retrieve shared-resource-viewer
oc_obj:
@@ -250,7 +188,6 @@
- "'annotations' in objout['results']['results'][0]['metadata']"
- "'openshift.io/reconcile-protect' in objout['results']['results'][0]['metadata']['annotations']"
- "objout['results']['results'][0]['metadata']['annotations']['openshift.io/reconcile-protect'] == 'true'"
-
- copy:
src: "{{ item }}"
dest: "/tmp/{{ item }}"
@@ -268,10 +205,16 @@
- "/tmp/{{ __master_shared_resource_viewer_file }}"
delete_after: true
when: __shared_resource_viewer_protected is not defined
+ register: result
+ retries: 3
+ delay: 5
+ until: result.rc == 0
+ ignore_errors: true
+
- name: Reconcile Security Context Constraints
command: >
- {{ openshift.common.client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name
+ {{ openshift_client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name
register: reconcile_scc_result
changed_when:
- reconcile_scc_result.stdout != ''
@@ -280,7 +223,7 @@
- name: Migrate storage post policy reconciliation
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
migrate storage --include=* --confirm
run_once: true
register: l_pb_upgrade_control_plane_post_upgrade_storage
@@ -303,8 +246,8 @@
tasks:
- set_fact:
reconcile_completed: "{{ hostvars
- | oo_select_keys(groups.oo_masters_to_config)
- | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
+ | lib_utils_oo_select_keys(groups.oo_masters_to_config)
+ | lib_utils_oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
- set_fact:
reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) | list }}"
- fail:
@@ -318,8 +261,8 @@
roles:
- openshift_facts
tasks:
- - include: docker/tasks/upgrade.yml
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
+ - include_tasks: docker/tasks/upgrade.yml
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift_is_atomic | bool
- name: Drain and upgrade master nodes
hosts: oo_masters_to_config:&oo_nodes_to_upgrade
@@ -330,7 +273,7 @@
pre_tasks:
- name: Load lib_openshift modules
- include_role:
+ import_role:
name: lib_openshift
# TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
@@ -344,25 +287,23 @@
retries: 10
delay: 5
register: node_unschedulable
- until: node_unschedulable|succeeded
+ until: node_unschedulable is succeeded
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_control_plane_drain_result
- until: not l_upgrade_control_plane_drain_result | failed
+ until: not (l_upgrade_control_plane_drain_result is failed)
retries: 60
delay: 60
roles:
- - lib_openshift
- openshift_facts
- - docker
- - openshift_node_dnsmasq
- - openshift_node_upgrade
-
post_tasks:
+ - import_role:
+ name: openshift_node
+ tasks_from: upgrade.yml
- name: Set node schedulability
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
@@ -371,5 +312,5 @@
retries: 10
delay: 5
register: node_schedulable
- until: node_schedulable|succeeded
- when: node_unschedulable|changed
+ until: node_schedulable is succeeded
+ when: node_unschedulable is changed
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index c93a5d89c..464af3ae6 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -1,16 +1,23 @@
---
+- name: Prepull images and rpms before doing rolling restart
+ hosts: oo_nodes_to_upgrade:!oo_masters_to_config
+ roles:
+ - role: openshift_facts
+ tasks:
+ - import_role:
+ name: openshift_node
+ tasks_from: upgrade_pre.yml
+
- name: Drain and upgrade nodes
hosts: oo_nodes_to_upgrade:!oo_masters_to_config
# This var must be set with -e on invocation, as it is not a per-host inventory var
# and is evaluated early. Values such as "20%" can also be used.
serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
-
+ roles:
+ - lib_openshift
+ - openshift_facts
pre_tasks:
- - name: Load lib_openshift modules
- include_role:
- name: lib_openshift
-
# TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
# or docker actually needs an upgrade before proceeding. Perhaps best to save this until
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
@@ -22,28 +29,21 @@
retries: 10
delay: 5
register: node_unschedulable
- until: node_unschedulable|succeeded
+ until: node_unschedulable is succeeded
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
- until: not l_upgrade_nodes_drain_result | failed
+ until: not (l_upgrade_nodes_drain_result is failed)
retries: 60
delay: 60
- roles:
- - lib_openshift
- - openshift_facts
- - docker
- - openshift_node_dnsmasq
- - openshift_node_upgrade
- - role: openshift_excluder
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
-
post_tasks:
+ - import_role:
+ name: openshift_node
+ tasks_from: upgrade.yml
- name: Set node schedulability
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
@@ -52,5 +52,13 @@
retries: 10
delay: 5
register: node_schedulable
- until: node_schedulable|succeeded
- when: node_unschedulable|changed
+ until: node_schedulable is succeeded
+ when: node_unschedulable is changed
+
+- name: Re-enable excluders
+ hosts: oo_nodes_to_upgrade:!oo_masters_to_config
+ tasks:
+ - import_role:
+ name: openshift_excluder
+ vars:
+ r_openshift_excluder_action: enable
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
new file mode 100644
index 000000000..6d59bfd0b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
@@ -0,0 +1,66 @@
+---
+- name: create new scale group
+ hosts: localhost
+ tasks:
+ - name: build upgrade scale groups
+ import_role:
+ name: openshift_aws
+ tasks_from: upgrade_node_group.yml
+
+ - fail:
+ msg: "Ensure that new scale groups were provisioned before proceeding to update."
+ when:
+ - "'oo_sg_new_nodes' not in groups or groups.oo_sg_new_nodes|length == 0"
+ - "'oo_sg_current_nodes' not in groups or groups.oo_sg_current_nodes|length == 0"
+ - groups.oo_sg_current_nodes == groups.oo_sg_new_nodes
+
+- name: initialize upgrade bits
+ import_playbook: init.yml
+
+- name: unschedule nodes
+ hosts: oo_sg_current_nodes
+ tasks:
+ - name: Load lib_openshift modules
+ import_role:
+ name: ../roles/lib_openshift
+
+ - name: Mark node unschedulable
+ oc_adm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: False
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ retries: 10
+ delay: 5
+ register: node_unschedulable
+ until: node_unschedulable is succeeded
+
+- name: Drain nodes
+ hosts: oo_sg_current_nodes
+ # This var must be set with -e on invocation, as it is not a per-host inventory var
+ # and is evaluated early. Values such as "20%" can also be used.
+ serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
+ max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
+ tasks:
+ - name: Drain Node for Kubelet upgrade
+ command: >
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --force --delete-local-data --ignore-daemonsets
+ --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ register: l_upgrade_nodes_drain_result
+ until: not (l_upgrade_nodes_drain_result is failed)
+ retries: "{{ 1 if openshift_upgrade_nodes_drain_timeout | default(0) == '0' else 0 | int }}"
+ delay: 5
+ failed_when:
+ - l_upgrade_nodes_drain_result is failed
+ - openshift_upgrade_nodes_drain_timeout | default(0) == '0'
+
+# Alright, let's clean up!
+- name: clean up the old scale group
+ hosts: localhost
+ tasks:
+ - name: clean up scale group
+ import_role:
+ name: openshift_aws
+ tasks_from: remove_scale_group.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
deleted file mode 100644
index 8558bf3e9..000000000
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
+++ /dev/null
@@ -1,173 +0,0 @@
----
-# Upgrade predicates
-- vars:
- prev_predicates: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}"
- prev_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, regions_enabled=False) }}"
- default_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', regions_enabled=False) }}"
- # older_predicates are the set of predicates that have previously been
- # hard-coded into openshift_facts
- older_predicates:
- - - name: MatchNodeSelector
- - name: PodFitsResources
- - name: PodFitsPorts
- - name: NoDiskConflict
- - name: NoVolumeZoneConflict
- - name: MaxEBSVolumeCount
- - name: MaxGCEPDVolumeCount
- - name: Region
- argument:
- serviceAffinity:
- labels:
- - region
- - - name: MatchNodeSelector
- - name: PodFitsResources
- - name: PodFitsPorts
- - name: NoDiskConflict
- - name: NoVolumeZoneConflict
- - name: Region
- argument:
- serviceAffinity:
- labels:
- - region
- - - name: MatchNodeSelector
- - name: PodFitsResources
- - name: PodFitsPorts
- - name: NoDiskConflict
- - name: Region
- argument:
- serviceAffinity:
- labels:
- - region
- # older_predicates_no_region are the set of predicates that have previously
- # been hard-coded into openshift_facts, with the Region predicate removed
- older_predicates_no_region:
- - - name: MatchNodeSelector
- - name: PodFitsResources
- - name: PodFitsPorts
- - name: NoDiskConflict
- - name: NoVolumeZoneConflict
- - name: MaxEBSVolumeCount
- - name: MaxGCEPDVolumeCount
- - - name: MatchNodeSelector
- - name: PodFitsResources
- - name: PodFitsPorts
- - name: NoDiskConflict
- - name: NoVolumeZoneConflict
- - - name: MatchNodeSelector
- - name: PodFitsResources
- - name: PodFitsPorts
- - name: NoDiskConflict
- block:
-
- # Handle case where openshift_master_predicates is defined
- - block:
- - debug:
- msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}"
- when: openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region]
-
- - debug:
- msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}"
- when: openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates
- when: openshift_master_scheduler_predicates | default(none) is not none
-
- # Handle cases where openshift_master_predicates is not defined
- - block:
- - debug:
- msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler predicates: {{ openshift_master_scheduler_current_predicates }}\ncurrent scheduler default predicates are: {{ openshift_master_scheduler_default_predicates }}"
- when:
- - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates
- - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates]
-
- - set_fact:
- openshift_upgrade_scheduler_predicates: "{{ openshift_master_scheduler_default_predicates }}"
- when:
- - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates
- - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates]
-
- - set_fact:
- openshift_upgrade_scheduler_predicates: "{{ default_predicates_no_region }}"
- when:
- - openshift_master_scheduler_current_predicates != default_predicates_no_region
- - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region]
-
- when: openshift_master_scheduler_predicates | default(none) is none
-
-
-# Upgrade priorities
-- vars:
- prev_priorities: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}"
- prev_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, zones_enabled=False) }}"
- default_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', zones_enabled=False) }}"
- # older_priorities are the set of priorities that have previously been
- # hard-coded into openshift_facts
- older_priorities:
- - - name: LeastRequestedPriority
- weight: 1
- - name: SelectorSpreadPriority
- weight: 1
- - name: Zone
- weight: 2
- argument:
- serviceAntiAffinity:
- label: zone
- # older_priorities_no_region are the set of priorities that have previously
- # been hard-coded into openshift_facts, with the Zone priority removed
- older_priorities_no_zone:
- - - name: LeastRequestedPriority
- weight: 1
- - name: SelectorSpreadPriority
- weight: 1
- block:
-
- # Handle case where openshift_master_priorities is defined
- - block:
- - debug:
- msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}"
- when: openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone]
-
- - debug:
- msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}"
- when: openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities
- when: openshift_master_scheduler_priorities | default(none) is not none
-
- # Handle cases where openshift_master_priorities is not defined
- - block:
- - debug:
- msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler priorities: {{ openshift_master_scheduler_current_priorities }}\ncurrent scheduler default priorities are: {{ openshift_master_scheduler_default_priorities }}"
- when:
- - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities
- - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities]
-
- - set_fact:
- openshift_upgrade_scheduler_priorities: "{{ openshift_master_scheduler_default_priorities }}"
- when:
- - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities
- - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities]
-
- - set_fact:
- openshift_upgrade_scheduler_priorities: "{{ default_priorities_no_zone }}"
- when:
- - openshift_master_scheduler_current_priorities != default_priorities_no_zone
- - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone]
-
- when: openshift_master_scheduler_priorities | default(none) is none
-
-
-# Update scheduler
-- vars:
- scheduler_config:
- kind: Policy
- apiVersion: v1
- predicates: "{{ openshift_upgrade_scheduler_predicates
- | default(openshift_master_scheduler_current_predicates) }}"
- priorities: "{{ openshift_upgrade_scheduler_priorities
- | default(openshift_master_scheduler_current_priorities) }}"
- block:
- - name: Update scheduler config
- copy:
- content: "{{ scheduler_config | to_nice_json }}"
- dest: "{{ openshift_master_scheduler_conf }}"
- backup: true
- when: >
- openshift_upgrade_scheduler_predicates is defined or
- openshift_upgrade_scheduler_priorities is defined
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
deleted file mode 100644
index 5e7a66171..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
+++ /dev/null
@@ -1,66 +0,0 @@
----
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.acceptContentTypes'
- yaml_value: 'application/vnd.kubernetes.protobuf,application/json'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.contentType'
- yaml_value: 'application/vnd.kubernetes.protobuf'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.burst'
- yaml_value: 400
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.qps'
- yaml_value: 200
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.acceptContentTypes'
- yaml_value: 'application/vnd.kubernetes.protobuf,application/json'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.contentType'
- yaml_value: 'application/vnd.kubernetes.protobuf'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.burst'
- yaml_value: 600
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.qps'
- yaml_value: 300
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
- yaml_value: service-signer.crt
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
- yaml_value: service-signer.key
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginConfig'
- yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "'admission_plugin_config' in openshift.master"
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginOrderOverride'
- yaml_value:
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.admissionConfig'
- yaml_value:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
deleted file mode 100644
index 89b524f14..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
- yaml_key: 'masterClientConnectionOverrides.acceptContentTypes'
- yaml_value: 'application/vnd.kubernetes.protobuf,application/json'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
- yaml_key: 'masterClientConnectionOverrides.contentType'
- yaml_value: 'application/vnd.kubernetes.protobuf'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
- yaml_key: 'masterClientConnectionOverrides.burst'
- yaml_value: 40
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
- yaml_key: 'masterClientConnectionOverrides.qps'
- yaml_value: 20
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/roles b/playbooks/common/openshift-cluster/upgrades/v3_3/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
deleted file mode 100644
index a241ef039..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ /dev/null
@@ -1,118 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_3/master_config_upgrade.yml"
-
-- include: ../upgrade_nodes.yml
- vars:
- node_config_hook: "v3_3/node_config_upgrade.yml"
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
deleted file mode 100644
index f64f0e003..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ /dev/null
@@ -1,119 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_3/master_config_upgrade.yml"
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
deleted file mode 100644
index cee4e9087..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
+++ /dev/null
@@ -1,113 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_nodes.yml
- vars:
- node_config_hook: "v3_3/node_config_upgrade.yml"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
deleted file mode 100644
index 52458e03c..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
- yaml_value: service-signer.crt
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
- yaml_value: service-signer.key
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/roles b/playbooks/common/openshift-cluster/upgrades/v3_4/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
deleted file mode 100644
index ae217ba2e..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ /dev/null
@@ -1,116 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_4/master_config_upgrade.yml"
-
-- include: ../upgrade_nodes.yml
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
deleted file mode 100644
index 43da5b629..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ /dev/null
@@ -1,119 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_4/master_config_upgrade.yml"
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
deleted file mode 100644
index 8531e6045..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
+++ /dev/null
@@ -1,111 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins
deleted file mode 120000
index 7de3c1dd7..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
deleted file mode 100644
index 52458e03c..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
- yaml_value: service-signer.crt
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
- yaml_value: service-signer.key
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
deleted file mode 100644
index 30e719d8f..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ /dev/null
@@ -1,118 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
-
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
-
-- include: ../upgrade_nodes.yml
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
deleted file mode 100644
index e9cec9220..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ /dev/null
@@ -1,123 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_5/master_config_upgrade.yml"
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
deleted file mode 100644
index e29d0f8e6..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
+++ /dev/null
@@ -1,111 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml
deleted file mode 100644
index ae63c9ca9..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml
+++ /dev/null
@@ -1,67 +0,0 @@
----
-###############################################################################
-# Pre upgrade checks for known data problems, if this playbook fails you should
-# contact support. If you're not supported contact users@lists.openshift.com
-#
-# oc_objectvalidator provides these two checks
-# 1 - SDN Data issues, never seen in the wild but known possible due to code audits
-# https://github.com/openshift/origin/issues/12697
-# 2 - Namespace protections, https://bugzilla.redhat.com/show_bug.cgi?id=1428934
-#
-###############################################################################
-- name: Verify 3.5 specific upgrade checks
- hosts: oo_first_master
- roles:
- - { role: lib_openshift }
- tasks:
- - name: Check for invalid namespaces and SDN errors
- oc_objectvalidator:
-
- # What's all this PetSet business about?
- #
- # 'PetSets' were ALPHA resources in Kube <= 3.4. In >= 3.5 they are
- # no longer supported. The BETA resource 'StatefulSets' replaces
- # them. We can't migrate clients PetSets to
- # StatefulSets. Additionally, Red Hat has never officially supported
- # these resource types. Sorry users, but if you were using
- # unsupported resources from the Kube documentation then we can't
- # help you at this time.
- #
- # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1428229
- - name: Check if legacy PetSets exist
- oc_obj:
- state: list
- all_namespaces: true
- kind: petsets
- register: l_do_petsets_exist
-
- - name: Fail on unsupported resource migration 'PetSets'
- fail:
- msg: >
- PetSet objects were detected in your cluster. These are an
- Alpha feature in upstream Kubernetes 1.4 and are not supported
- by Red Hat. In Kubernetes 1.5, they are replaced by the Beta
- feature StatefulSets. Red Hat currently does not offer support
- for either PetSets or StatefulSets.
-
- Automatically migrating PetSets to StatefulSets in OpenShift
- Container Platform (OCP) 3.5 is not supported. See the
- Kubernetes "Upgrading from PetSets to StatefulSets"
- documentation for additional information:
-
- https://kubernetes.io/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set/
-
- PetSets MUST be removed before upgrading to OCP 3.5. Red Hat
- strongly recommends reading the above referenced documentation
- in its entirety before taking any destructive actions.
-
- If you want to simply remove all PetSets without manually
- migrating to StatefulSets, run this command as a user with
- cluster-admin privileges:
-
- $ oc get petsets --all-namespaces -o yaml | oc delete -f - --cascade=false
- when:
- # Search did not fail, valid resource type found
- - l_do_petsets_exist.results.returncode == 0
- # Items do exist in the search results
- - l_do_petsets_exist.results.results.0['items'] | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins
deleted file mode 120000
index 7de3c1dd7..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index 920dc2ffc..d520c6aee 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -2,7 +2,7 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -13,110 +13,31 @@
tasks:
- set_fact:
openshift_upgrade_target: '3.6'
- openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}"
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_6/master_config_upgrade.yml"
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index 27d8515dc..a956fdde5 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -11,117 +11,38 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
- tags:
- - pre_upgrade
+- import_playbook: ../init.yml
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: '3.6'
- openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
tasks:
- set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
+ openshift_upgrade_target: '3.6'
+ openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}"
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_6/master_config_upgrade.yml"
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index ba6fcc3f8..4febe76ee 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -4,7 +4,7 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -15,101 +15,24 @@
tasks:
- set_fact:
openshift_upgrade_target: '3.6'
- openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}"
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins
deleted file mode 120000
index 7de3c1dd7..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index bf3b94682..4daa9e490 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -2,7 +2,7 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -15,128 +15,45 @@
openshift_upgrade_target: '3.7'
openshift_upgrade_min: '3.6'
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-#TODO: Why doesn't this compose using ./upgrade_control_plane rather than
-# ../upgrade_control_plane?
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
- hosts: oo_etcd_to_config
+ hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index b91bea617..1750148d4 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -11,135 +11,54 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
- tags:
- - pre_upgrade
+- import_playbook: ../init.yml
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
openshift_upgrade_target: '3.7'
openshift_upgrade_min: '3.6'
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
- hosts: oo_etcd_to_config
+ hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
index bc080f9a3..16d95514c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -4,7 +4,7 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -17,99 +17,22 @@
openshift_upgrade_target: '3.7'
openshift_upgrade_min: '3.6'
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
index f76fc68d1..49e691352 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
@@ -11,13 +11,15 @@
tasks:
- name: Check for invalid namespaces and SDN errors
oc_objectvalidator:
-
+ # DO NOT DISABLE THIS, YOUR UPGRADE WILL FAIL IF YOU DO SO
- name: Confirm OpenShift authorization objects are in sync
command: >
- {{ openshift.common.client_binary }} adm migrate authorization
- when: not openshift.common.version_gte_3_7 | bool
+ {{ openshift_client_binary }} adm migrate authorization
+ when:
+ - openshift_currently_installed_version is version_compare('3.7','<')
+ - openshift_upgrade_pre_authorization_migration_enabled | default(true) | bool
changed_when: false
register: l_oc_result
until: l_oc_result.rc == 0
- retries: 4
+ retries: 2
delay: 15
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/master_config_upgrade.yml
index 1d4d1919c..1d4d1919c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/master_config_upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/roles b/playbooks/common/openshift-cluster/upgrades/v3_8/roles
index 415645be6..415645be6 120000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/roles
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/roles
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
new file mode 100644
index 000000000..0f74e0137
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -0,0 +1,59 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- import_playbook: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.8'
+ openshift_upgrade_min: '3.7'
+
+- import_playbook: ../pre/config.yml
+ vars:
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
+
+- import_playbook: validator.yml
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
+
+- import_playbook: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_facts
+ tasks:
+ - name: Stop {{ openshift_service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift_service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift_service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift_service_type }}-master-controllers"
+ state: started
+
+- import_playbook: ../upgrade_nodes.yml
+
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
new file mode 100644
index 000000000..08bfd239f
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -0,0 +1,64 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- import_playbook: ../init.yml
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.8'
+ openshift_upgrade_min: '3.7'
+
+- import_playbook: ../pre/config.yml
+ vars:
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
+
+- import_playbook: validator.yml
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
+
+- import_playbook: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_facts
+ tasks:
+ - name: Stop {{ openshift_service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift_service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift_service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift_service_type }}-master-controllers"
+ state: started
+
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
new file mode 100644
index 000000000..b5f1038fd
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
@@ -0,0 +1,38 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- import_playbook: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.8'
+ openshift_upgrade_min: '3.7'
+
+- import_playbook: ../pre/config.yml
+ vars:
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
+
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml
new file mode 100644
index 000000000..d8540abfb
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml
@@ -0,0 +1,7 @@
+---
+- name: Verify 3.8 specific upgrade checks
+ hosts: oo_first_master
+ roles:
+ - { role: lib_openshift }
+ tasks:
+ - debug: msg="noop"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
index db0c8f886..1d4d1919c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
@@ -1,6 +1,11 @@
---
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.election.lockName'
+ yaml_value: 'openshift-master-controllers'
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
yaml_value: service-signer.crt
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/roles b/playbooks/common/openshift-cluster/upgrades/v3_9/roles
new file mode 120000
index 000000000..415645be6
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/roles
@@ -0,0 +1 @@
+../../../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
new file mode 100644
index 000000000..0aea5069d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -0,0 +1,55 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- import_playbook: ../init.yml
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.9'
+ openshift_upgrade_min: '3.7'
+
+- import_playbook: ../pre/config.yml
+ vars:
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
+
+- import_playbook: validator.yml
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
+
+- import_playbook: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_facts
+ tasks:
+ - name: Stop {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
+
+- import_playbook: ../upgrade_nodes.yml
+
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
new file mode 100644
index 000000000..05aa737c6
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -0,0 +1,65 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- import_playbook: ../init.yml
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.9'
+ openshift_upgrade_min: '3.7'
+
+- import_playbook: ../pre/config.yml
+ vars:
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
+
+- import_playbook: validator.yml
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
+
+
+- import_playbook: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_facts
+ tasks:
+ - name: Stop {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
+
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
new file mode 100644
index 000000000..1d1b255c1
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
@@ -0,0 +1,34 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- import_playbook: ../init.yml
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.9'
+ openshift_upgrade_min: '3.7'
+
+- import_playbook: ../pre/config.yml
+ vars:
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
+
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
new file mode 100644
index 000000000..4bd2d87b1
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
@@ -0,0 +1,7 @@
+---
+- name: Verify 3.9 specific upgrade checks
+ hosts: oo_first_master
+ roles:
+ - { role: lib_openshift }
+ tasks:
+ - debug: msg="noop"
diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml
deleted file mode 100644
index be2e6a15a..000000000
--- a/playbooks/common/openshift-cluster/validate_hostnames.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Validate node hostnames
- hosts: oo_nodes_to_config
- tasks:
- - name: Query DNS for IP address of {{ openshift.common.hostname }}
- shell:
- getent ahostsv4 {{ openshift.common.hostname }} | head -n 1 | awk '{ print $1 }'
- register: lookupip
- changed_when: false
- failed_when: false
- - name: Warn user about bad openshift_hostname values
- pause:
- prompt:
- The hostname {{ openshift.common.hostname }} for {{ ansible_nodename }}
- doesn't resolve to an IP address owned by this host. Please set
- openshift_hostname variable to a hostname that when resolved on the host
- in question resolves to an IP address matching an interface on this
- host. This host will fail liveness checks for pods utilizing hostPorts,
- press ENTER to continue or CTRL-C to abort.
- seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}"
- when:
- - lookupip.stdout != '127.0.0.1'
- - lookupip.stdout not in ansible_all_ipv4_addresses
diff --git a/playbooks/common/openshift-etcd/certificates.yml b/playbooks/common/openshift-etcd/certificates.yml
deleted file mode 100644
index eb6b94f33..000000000
--- a/playbooks/common/openshift-etcd/certificates.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: server_certificates.yml
-
-- include: master_etcd_certificates.yml
diff --git a/playbooks/common/openshift-etcd/filter_plugins b/playbooks/common/openshift-etcd/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/common/openshift-etcd/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-etcd/lookup_plugins b/playbooks/common/openshift-etcd/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/common/openshift-etcd/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-etcd/restart.yml b/playbooks/common/openshift-etcd/restart.yml
deleted file mode 100644
index 5eaea5ae8..000000000
--- a/playbooks/common/openshift-etcd/restart.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-- name: Restart etcd
- hosts: oo_etcd_to_config
- serial: 1
- tasks:
- - name: restart etcd
- service:
- name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}"
- state: restarted
- when:
- - not g_etcd_certificates_expired | default(false) | bool
-
-- name: Restart etcd
- hosts: oo_etcd_to_config
- tasks:
- - name: stop etcd
- service:
- name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}"
- state: stopped
- when:
- - g_etcd_certificates_expired | default(false) | bool
- - name: start etcd
- service:
- name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}"
- state: started
- when:
- - g_etcd_certificates_expired | default(false) | bool
diff --git a/playbooks/common/openshift-glusterfs/filter_plugins b/playbooks/common/openshift-glusterfs/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/common/openshift-glusterfs/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-glusterfs/lookup_plugins b/playbooks/common/openshift-glusterfs/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/common/openshift-glusterfs/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-glusterfs/registry.yml b/playbooks/common/openshift-glusterfs/registry.yml
deleted file mode 100644
index 80cf7529e..000000000
--- a/playbooks/common/openshift-glusterfs/registry.yml
+++ /dev/null
@@ -1,49 +0,0 @@
----
-- include: config.yml
-
-- name: Initialize GlusterFS registry PV and PVC vars
- hosts: oo_first_master
- tags: hosted
- tasks:
- - set_fact:
- glusterfs_pv: []
- glusterfs_pvc: []
-
- - set_fact:
- glusterfs_pv:
- - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume"
- capacity: "{{ openshift.hosted.registry.storage.volume.size }}"
- access_modes: "{{ openshift.hosted.registry.storage.access.modes }}"
- storage:
- glusterfs:
- endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}"
- path: "{{ openshift.hosted.registry.storage.glusterfs.path }}"
- readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}"
- glusterfs_pvc:
- - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim"
- capacity: "{{ openshift.hosted.registry.storage.volume.size }}"
- access_modes: "{{ openshift.hosted.registry.storage.access.modes }}"
- when: openshift.hosted.registry.storage.glusterfs.swap
-
-- name: Create persistent volumes
- hosts: oo_first_master
- tags:
- - hosted
- vars:
- persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}"
- persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}"
- roles:
- - role: openshift_persistent_volumes
- when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0
-
-- name: Create Hosted Resources
- hosts: oo_first_master
- tags:
- - hosted
- pre_tasks:
- - set_fact:
- openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
- openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
- when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
- roles:
- - role: openshift_hosted
diff --git a/playbooks/common/openshift-loadbalancer/filter_plugins b/playbooks/common/openshift-loadbalancer/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/common/openshift-loadbalancer/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-loadbalancer/lookup_plugins b/playbooks/common/openshift-loadbalancer/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/common/openshift-loadbalancer/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-management/filter_plugins b/playbooks/common/openshift-management/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/common/openshift-management/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-management/library b/playbooks/common/openshift-management/library
deleted file mode 120000
index ba40d2f56..000000000
--- a/playbooks/common/openshift-management/library
+++ /dev/null
@@ -1 +0,0 @@
-../../../library \ No newline at end of file
diff --git a/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js b/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js
deleted file mode 100644
index d0a9f11dc..000000000
--- a/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js
+++ /dev/null
@@ -1,2 +0,0 @@
-// empty file so that the master-config can still point to a file that exists
-// this file will be replaced by the template service broker role if enabled
diff --git a/playbooks/common/openshift-master/filter_plugins b/playbooks/common/openshift-master/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/common/openshift-master/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-master/library b/playbooks/common/openshift-master/library
deleted file mode 120000
index d0b7393d3..000000000
--- a/playbooks/common/openshift-master/library
+++ /dev/null
@@ -1 +0,0 @@
-../../../library/ \ No newline at end of file
diff --git a/playbooks/common/openshift-master/lookup_plugins b/playbooks/common/openshift-master/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/common/openshift-master/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml
deleted file mode 100644
index 4f8b758fd..000000000
--- a/playbooks/common/openshift-master/restart_services.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-- name: Restart master API
- service:
- name: "{{ openshift.common.service_type }}-master-api"
- state: restarted
- when: openshift_master_ha | bool
-- name: Wait for master API to come back online
- wait_for:
- host: "{{ openshift.common.hostname }}"
- state: started
- delay: 10
- port: "{{ openshift.master.api_port }}"
- timeout: 600
- when: openshift_master_ha | bool
-- name: Restart master controllers
- service:
- name: "{{ openshift.common.service_type }}-master-controllers"
- state: restarted
- # Ignore errrors since it is possible that type != simple for
- # pre-3.1.1 installations.
- ignore_errors: true
- when: openshift_master_ha | bool
diff --git a/playbooks/common/openshift-nfs/filter_plugins b/playbooks/common/openshift-nfs/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/common/openshift-nfs/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-nfs/lookup_plugins b/playbooks/common/openshift-nfs/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/common/openshift-nfs/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
deleted file mode 100644
index 4f8f98aef..000000000
--- a/playbooks/common/openshift-node/config.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-- name: Node Install Checkpoint Start
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set Node install 'In Progress'
- set_stats:
- data:
- installer_phase_node: "In Progress"
- aggregate: false
-
-- include: certificates.yml
-
-- include: setup.yml
-
-- include: containerized_nodes.yml
-
-- include: configure_nodes.yml
-
-- include: additional_config.yml
-
-- include: manage_node.yml
-
-- include: enable_excluders.yml
-
-- name: Node Install Checkpoint End
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set Node install 'Complete'
- set_stats:
- data:
- installer_phase_node: "Complete"
- aggregate: false
diff --git a/playbooks/common/openshift-node/filter_plugins b/playbooks/common/openshift-node/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/common/openshift-node/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-node/image_prep.yml b/playbooks/common/openshift-node/image_prep.yml
deleted file mode 100644
index 00d167c22..000000000
--- a/playbooks/common/openshift-node/image_prep.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: normalize groups
- include: ../../byo/openshift-cluster/initialize_groups.yml
-
-- name: evaluate the groups
- include: ../openshift-cluster/evaluate_groups.yml
-
-- name: initialize the facts
- include: ../openshift-cluster/initialize_facts.yml
-
-- name: initialize the repositories
- include: ../openshift-cluster/initialize_openshift_repos.yml
-
-- name: run node config setup
- include: setup.yml
-
-- name: run node config
- include: configure_nodes.yml
-
-- name: Re-enable excluders
- include: enable_excluders.yml
diff --git a/playbooks/common/openshift-node/lookup_plugins b/playbooks/common/openshift-node/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/common/openshift-node/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/container-runtime/config.yml b/playbooks/container-runtime/config.yml
new file mode 100644
index 000000000..f15aa771f
--- /dev/null
+++ b/playbooks/container-runtime/config.yml
@@ -0,0 +1,6 @@
+---
+- import_playbook: ../init/main.yml
+ vars:
+ skip_verison: True
+
+- import_playbook: private/config.yml
diff --git a/playbooks/container-runtime/private/build_container_groups.yml b/playbooks/container-runtime/private/build_container_groups.yml
new file mode 100644
index 000000000..7fd60743c
--- /dev/null
+++ b/playbooks/container-runtime/private/build_container_groups.yml
@@ -0,0 +1,6 @@
+---
+- name: create oo_hosts_containerized_managed_true host group
+ hosts: oo_all_hosts:!oo_nodes_to_config
+ tasks:
+ - group_by:
+ key: oo_hosts_containerized_managed_{{ (containerized | default(False)) | ternary('true','false') }}
diff --git a/playbooks/container-runtime/private/config.yml b/playbooks/container-runtime/private/config.yml
new file mode 100644
index 000000000..7a49adcf0
--- /dev/null
+++ b/playbooks/container-runtime/private/config.yml
@@ -0,0 +1,25 @@
+---
+- import_playbook: build_container_groups.yml
+
+- hosts: oo_nodes_to_config:oo_hosts_containerized_managed_true
+ roles:
+ - role: container_runtime
+ tasks:
+ - import_role:
+ name: container_runtime
+ tasks_from: package_docker.yml
+ when:
+ - not openshift_docker_use_system_container | bool
+ - not openshift_use_crio_only | bool
+ - import_role:
+ name: container_runtime
+ tasks_from: systemcontainer_docker.yml
+ when:
+ - openshift_docker_use_system_container | bool
+ - not openshift_use_crio_only | bool
+ - import_role:
+ name: container_runtime
+ tasks_from: systemcontainer_crio.yml
+ when:
+ - openshift_use_crio | bool
+ - openshift_docker_is_node_or_master | bool
diff --git a/playbooks/container-runtime/private/roles b/playbooks/container-runtime/private/roles
new file mode 120000
index 000000000..148b13206
--- /dev/null
+++ b/playbooks/container-runtime/private/roles
@@ -0,0 +1 @@
+../../roles/ \ No newline at end of file
diff --git a/playbooks/container-runtime/private/setup_storage.yml b/playbooks/container-runtime/private/setup_storage.yml
new file mode 100644
index 000000000..a6d396270
--- /dev/null
+++ b/playbooks/container-runtime/private/setup_storage.yml
@@ -0,0 +1,18 @@
+---
+- import_playbook: build_container_groups.yml
+
+- hosts: oo_nodes_to_config:oo_hosts_containerized_managed_true
+ vars:
+ l_chg_temp: "{{ hostvars[groups['oo_first_master'][0]]['openshift_containerized_host_groups'] | default([]) }}"
+ l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}"
+ # role: container_runtime is necessary here to bring role default variables
+ # into the play scope.
+ roles:
+ - role: container_runtime
+ tasks:
+ - import_role:
+ name: container_runtime
+ tasks_from: docker_storage_setup_overlay.yml
+ when:
+ - container_runtime_docker_storage_type|default('') == "overlay2"
+ - openshift_docker_is_node_or_master | bool
diff --git a/playbooks/container-runtime/setup_storage.yml b/playbooks/container-runtime/setup_storage.yml
new file mode 100644
index 000000000..98e876b2c
--- /dev/null
+++ b/playbooks/container-runtime/setup_storage.yml
@@ -0,0 +1,6 @@
+---
+- import_playbook: ../init/main.yml
+ vars:
+ skip_verison: True
+
+- import_playbook: private/setup_storage.yml
diff --git a/playbooks/deploy_cluster.yml b/playbooks/deploy_cluster.yml
new file mode 100644
index 000000000..5efdc486a
--- /dev/null
+++ b/playbooks/deploy_cluster.yml
@@ -0,0 +1,49 @@
+---
+- import_playbook: init/main.yml
+
+- import_playbook: openshift-checks/private/install.yml
+
+- import_playbook: openshift-etcd/private/config.yml
+
+- import_playbook: openshift-nfs/private/config.yml
+ when: groups.oo_nfs_to_config | default([]) | count > 0
+
+- import_playbook: openshift-loadbalancer/private/config.yml
+ when: groups.oo_lb_to_config | default([]) | count > 0
+
+- import_playbook: openshift-master/private/config.yml
+
+- import_playbook: openshift-master/private/additional_config.yml
+
+- import_playbook: openshift-node/private/config.yml
+
+- import_playbook: openshift-glusterfs/private/config.yml
+ when: groups.oo_glusterfs_to_config | default([]) | count > 0
+
+- import_playbook: openshift-hosted/private/config.yml
+
+- import_playbook: openshift-web-console/private/config.yml
+ when: openshift_web_console_install | default(true) | bool
+
+- import_playbook: openshift-metrics/private/config.yml
+ when: openshift_metrics_install_metrics | default(false) | bool
+
+- import_playbook: openshift-logging/private/config.yml
+ when: openshift_logging_install_logging | default(false) | bool
+
+- import_playbook: openshift-prometheus/private/config.yml
+ when: openshift_hosted_prometheus_deploy | default(false) | bool
+
+- import_playbook: openshift-service-catalog/private/config.yml
+ when: openshift_enable_service_catalog | default(true) | bool
+
+- import_playbook: openshift-management/private/config.yml
+ when: openshift_management_install_management | default(false) | bool
+
+- name: Print deprecated variable warning message if necessary
+ hosts: oo_first_master
+ gather_facts: no
+ tasks:
+ - debug: msg="{{__deprecation_message}}"
+ when:
+ - __deprecation_message | default ('') | length > 0
diff --git a/playbooks/gcp/openshift-cluster/provision.yml b/playbooks/gcp/openshift-cluster/provision.yml
deleted file mode 100644
index a3d1d46a6..000000000
--- a/playbooks/gcp/openshift-cluster/provision.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- name: Ensure all cloud resources necessary for the cluster, including instances, have been started
- hosts: localhost
- connection: local
- gather_facts: no
- tasks:
-
- - name: provision a GCP cluster in the specified project
- include_role:
- name: openshift_gcp
-
-- name: normalize groups
- include: ../../byo/openshift-cluster/initialize_groups.yml
-
-- name: run the std_include
- include: ../../common/openshift-cluster/std_include.yml
-
-- name: run the config
- include: ../../common/openshift-cluster/config.yml
diff --git a/playbooks/gcp/provision.yml b/playbooks/gcp/provision.yml
new file mode 100644
index 000000000..b6edf9961
--- /dev/null
+++ b/playbooks/gcp/provision.yml
@@ -0,0 +1,13 @@
+---
+- name: Ensure all cloud resources necessary for the cluster, including instances, have been started
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+
+ - name: provision a GCP cluster in the specified project
+ import_role:
+ name: openshift_gcp
+
+- name: run the cluster deploy
+ import_playbook: ../deploy_cluster.yml
diff --git a/playbooks/init/base_packages.yml b/playbooks/init/base_packages.yml
new file mode 100644
index 000000000..15b3dd492
--- /dev/null
+++ b/playbooks/init/base_packages.yml
@@ -0,0 +1,37 @@
+---
+- name: Install packages necessary for installer
+ hosts: oo_all_hosts
+ any_errors_fatal: true
+ tasks:
+ - when:
+ - not openshift_is_atomic | bool
+ block:
+ - name: Ensure openshift-ansible installer package deps are installed
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - iproute
+ - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
+ - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
+ - yum-utils
+ register: result
+ until: result is succeeded
+
+ - name: Ensure various deps for running system containers are installed
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - atomic
+ - ostree
+ - runc
+ when:
+ - >
+ (openshift_use_system_containers | default(False)) | bool
+ or (openshift_use_etcd_system_container | default(False)) | bool
+ or (openshift_use_openvswitch_system_container | default(False)) | bool
+ or (openshift_use_node_system_container | default(False)) | bool
+ or (openshift_use_master_system_container | default(False)) | bool
+ register: result
+ until: result is succeeded
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/init/evaluate_groups.yml
index 78b552279..8087f6ffc 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/init/evaluate_groups.yml
@@ -5,6 +5,9 @@
become: no
gather_facts: no
tasks:
+ - name: Load group name mapping variables
+ include_vars: vars/cluster_hosts.yml
+
- name: Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required
fail:
msg: This playbook requires g_etcd_hosts or g_new_etcd_hosts to be set
@@ -43,14 +46,9 @@
- name: Evaluate groups - Fail if no etcd hosts group is defined
fail:
msg: >
- Running etcd as an embedded service is no longer supported. If this is a
- new install please define an 'etcd' group with either one or three
- hosts. These hosts may be the same hosts as your masters. If this is an
- upgrade you may set openshift_master_unsupported_embedded_etcd=true
- until a migration playbook becomes available.
+ Running etcd as an embedded service is no longer supported.
when:
- g_etcd_hosts | default([]) | length not in [3,1]
- - not openshift_master_unsupported_embedded_etcd | default(False)
- not (openshift_node_bootstrap | default(False))
- name: Evaluate oo_all_hosts
diff --git a/playbooks/init/facts.yml b/playbooks/init/facts.yml
new file mode 100644
index 000000000..094db845d
--- /dev/null
+++ b/playbooks/init/facts.yml
@@ -0,0 +1,104 @@
+---
+- name: Ensure that all non-node hosts are accessible
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config
+ any_errors_fatal: true
+ tasks:
+
+- name: Initialize host facts
+ hosts: oo_all_hosts
+ tasks:
+ - name: load openshift_facts module
+ import_role:
+ name: openshift_facts
+
+ # TODO: Should this role be refactored into health_checks??
+ - name: Run openshift_sanitize_inventory to set variables
+ import_role:
+ name: openshift_sanitize_inventory
+
+ - name: Detecting Operating System from ostree_booted
+ stat:
+ path: /run/ostree-booted
+ register: ostree_booted
+
+ # TODO(michaelgugino) remove this line once CI is updated.
+ - name: set openshift_deployment_type if unset
+ set_fact:
+ openshift_deployment_type: "{{ deployment_type }}"
+ when:
+ - openshift_deployment_type is undefined
+ - deployment_type is defined
+
+ - name: initialize_facts set fact openshift_is_atomic and openshift_is_containerized
+ set_fact:
+ openshift_is_atomic: "{{ ostree_booted.stat.exists }}"
+ openshift_is_containerized: "{{ ostree_booted.stat.exists or (containerized | default(false) | bool) }}"
+
+ # TODO: Should this be moved into health checks??
+ # Seems as though any check that happens with a corresponding fail should move into health_checks
+ # Fail as early as possible if Atomic and old version of Docker
+ - when:
+ - openshift_is_atomic | bool
+ block:
+
+ # See https://access.redhat.com/articles/2317361
+ # and https://github.com/ansible/ansible/issues/15892
+ # NOTE: the "'s can not be removed at this level else the docker command will fail
+ # NOTE: When ansible >2.2.1.x is used this can be updated per
+ # https://github.com/openshift/openshift-ansible/pull/3475#discussion_r103525121
+ - name: Determine Atomic Host Docker Version
+ shell: 'CURLY="{"; docker version --format "$CURLY{json .Server.Version}}"'
+ register: l_atomic_docker_version
+
+ - name: assert atomic host docker version is 1.12 or later
+ assert:
+ that:
+ - l_atomic_docker_version.stdout | replace('"', '') is version_compare('1.12','>=')
+ msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.
+
+ - name: Gather Cluster facts
+ openshift_facts:
+ role: common
+ local_facts:
+ deployment_type: "{{ openshift_deployment_type }}"
+ deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
+ hostname: "{{ openshift_hostname | default(None) }}"
+ ip: "{{ openshift_ip | default(None) }}"
+ public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ public_ip: "{{ openshift_public_ip | default(None) }}"
+ portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
+ http_proxy: "{{ openshift_http_proxy | default(None) }}"
+ https_proxy: "{{ openshift_https_proxy | default(None) }}"
+ no_proxy: "{{ openshift_no_proxy | default(None) }}"
+ generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
+
+ - name: Set fact of no_proxy_internal_hostnames
+ openshift_facts:
+ role: common
+ local_facts:
+ no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+ - name: Initialize openshift.node.sdn_mtu
+ openshift_facts:
+ role: node
+ local_facts:
+ sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
+
+- name: Initialize special first-master variables
+ hosts: oo_first_master
+ roles:
+ - role: openshift_facts
+ tasks:
+ - set_fact:
+ # We need to setup openshift_client_binary here for special uses of delegate_to in
+ # later roles and plays.
+ first_master_client_binary: "{{ openshift_client_binary }}"
+ #Some roles may require this to be set for first master
+ openshift_client_binary: "{{ openshift_client_binary }}"
diff --git a/playbooks/init/main.yml b/playbooks/init/main.yml
new file mode 100644
index 000000000..20457e508
--- /dev/null
+++ b/playbooks/init/main.yml
@@ -0,0 +1,36 @@
+---
+- name: Initialization Checkpoint Start
+ hosts: all
+ gather_facts: false
+ roles:
+ - installer_checkpoint
+ tasks:
+ - name: Set install initialization 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_initialize:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- import_playbook: evaluate_groups.yml
+
+- import_playbook: facts.yml
+
+- import_playbook: sanity_checks.yml
+ when: not (skip_sanity_checks | default(False))
+
+- import_playbook: version.yml
+ when: not (skip_verison | default(False))
+
+- name: Initialization Checkpoint End
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Set install initialization 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_initialize:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/init/repos.yml b/playbooks/init/repos.yml
new file mode 100644
index 000000000..667f38ddd
--- /dev/null
+++ b/playbooks/init/repos.yml
@@ -0,0 +1,16 @@
+---
+- name: Setup yum repositories for all hosts
+ hosts: oo_all_hosts
+ gather_facts: no
+ tasks:
+ - name: subscribe instances to Red Hat Subscription Manager
+ import_role:
+ name: rhel_subscribe
+ when:
+ - ansible_distribution == 'RedHat'
+ - openshift_deployment_type == 'openshift-enterprise'
+ - rhsub_user is defined
+ - rhsub_pass is defined
+ - name: initialize openshift repos
+ import_role:
+ name: openshift_repos
diff --git a/playbooks/init/roles b/playbooks/init/roles
new file mode 120000
index 000000000..b741aa3db
--- /dev/null
+++ b/playbooks/init/roles
@@ -0,0 +1 @@
+../../roles \ No newline at end of file
diff --git a/playbooks/init/sanity_checks.yml b/playbooks/init/sanity_checks.yml
new file mode 100644
index 000000000..52bcf42c0
--- /dev/null
+++ b/playbooks/init/sanity_checks.yml
@@ -0,0 +1,15 @@
+---
+- name: Verify Requirements
+ hosts: oo_first_master
+ roles:
+ - role: lib_utils
+ tasks:
+ # sanity_checks is a custom action plugin defined in lib_utils.
+ # This module will loop through all the hostvars for each host
+ # specified in check_hosts.
+ # Since sanity_checks is an action_plugin, it executes on the control host.
+ # Thus, sanity_checks cannot gather new information about any hosts.
+ - name: Run variable sanity checks
+ sanity_checks:
+ check_hosts: "{{ groups['oo_all_hosts'] }}"
+ run_once: True
diff --git a/playbooks/init/validate_hostnames.yml b/playbooks/init/validate_hostnames.yml
new file mode 100644
index 000000000..86e0b2416
--- /dev/null
+++ b/playbooks/init/validate_hostnames.yml
@@ -0,0 +1,43 @@
+---
+- name: Validate node hostnames
+ hosts: oo_nodes_to_config
+ any_errors_fatal: true
+ tasks:
+ - name: Query DNS for IP address of {{ openshift.common.hostname }}
+ shell:
+ getent ahostsv4 {{ openshift.common.hostname }} | head -n 1 | awk '{ print $1 }'
+ register: lookupip
+ changed_when: false
+ failed_when: false
+
+ - name: Validate openshift_hostname when defined
+ fail:
+ msg: >
+ The hostname {{ openshift.common.hostname }} for {{ ansible_nodename }}
+ doesn't resolve to an IP address owned by this host. Please set
+ openshift_hostname variable to a hostname that when resolved on the host
+ in question resolves to an IP address matching an interface on this host.
+ This will ensure proper functionality of OpenShift networking features.
+ Inventory setting: openshift_hostname={{ openshift_hostname }}
+ This check can be overridden by setting openshift_hostname_check=false in
+ the inventory.
+ See https://docs.openshift.org/latest/install_config/install/advanced_install.html#configuring-host-variables
+ when:
+ - lookupip.stdout != '127.0.0.1'
+ - lookupip.stdout not in ansible_all_ipv4_addresses
+ - openshift_hostname_check | default(true)
+
+ - name: Validate openshift_ip exists on node when defined
+ fail:
+ msg: >
+ The IP address {{ openshift_ip }} does not exist on {{ ansible_nodename }}.
+ Please set the openshift_ip variable to an IP address of this node.
+ This will ensure proper functionality of OpenShift networking features.
+ Inventory setting: openshift_ip={{ openshift_ip }}
+ This check can be overridden by setting openshift_ip_check=false in
+ the inventory.
+ See https://docs.openshift.org/latest/install_config/install/advanced_install.html#configuring-host-variables
+ when:
+ - openshift_ip is defined
+ - openshift_ip not in ansible_all_ipv4_addresses
+ - openshift_ip_check | default(true)
diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/init/vars/cluster_hosts.yml
index e807ac004..e807ac004 100644
--- a/playbooks/byo/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/init/vars/cluster_hosts.yml
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/init/version.yml
index e6400ea61..37a5284d5 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/init/version.yml
@@ -1,15 +1,4 @@
---
-# openshift_install_base_package_group may be set in a play variable to limit
-# the host groups the base package is installed on. This is currently used
-# for master/control-plane upgrades.
-- name: Set version_install_base_package true on masters and nodes
- hosts: "{{ openshift_install_base_package_group | default('oo_masters_to_config:oo_nodes_to_config') }}"
- tasks:
- - name: Set version_install_base_package true
- set_fact:
- version_install_base_package: True
- when: version_install_base_package is not defined
-
# NOTE: requires openshift_facts be run
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
diff --git a/playbooks/byo/openshift-checks/README.md b/playbooks/openshift-checks/README.md
index b26e7d7ed..0b7ea91ff 100644
--- a/playbooks/byo/openshift-checks/README.md
+++ b/playbooks/openshift-checks/README.md
@@ -47,19 +47,19 @@ against your inventory file. Here is the step-by-step:
3. Run the appropriate playbook:
```console
- $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/pre-install.yml
+ $ ansible-playbook -i <inventory file> playbooks/openshift-checks/pre-install.yml
```
or
```console
- $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/health.yml
+ $ ansible-playbook -i <inventory file> playbooks/openshift-checks/health.yml
```
or
```console
- $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/certificate_expiry/default.yaml -v
+ $ ansible-playbook -i <inventory file> playbooks/openshift-checks/certificate_expiry/default.yaml -v
```
### The adhoc playbook
@@ -72,19 +72,19 @@ using the `-e` flag.
For example, to run the `docker_storage` check:
```console
-$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=docker_storage
+$ ansible-playbook -i <inventory file> playbooks/openshift-checks/adhoc.yml -e openshift_checks=docker_storage
```
To run more checks, use a comma-separated list of check names:
```console
-$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=docker_storage,disk_availability
+$ ansible-playbook -i <inventory file> playbooks/openshift-checks/adhoc.yml -e openshift_checks=docker_storage,disk_availability
```
To run an entire class of checks, use the name of a check group tag, prefixed by `@`. This will run all checks tagged `preflight`:
```console
-$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=@preflight
+$ ansible-playbook -i <inventory file> playbooks/openshift-checks/adhoc.yml -e openshift_checks=@preflight
```
It is valid to specify multiple check tags and individual check names together
@@ -94,7 +94,7 @@ To list all of the available checks and tags, run the adhoc playbook without
setting the `openshift_checks` variable:
```console
-$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml
+$ ansible-playbook -i <inventory file> playbooks/openshift-checks/adhoc.yml
```
## Running in a container
diff --git a/playbooks/byo/openshift-checks/adhoc.yml b/playbooks/openshift-checks/adhoc.yml
index 226bed732..414090733 100644
--- a/playbooks/byo/openshift-checks/adhoc.yml
+++ b/playbooks/openshift-checks/adhoc.yml
@@ -1,6 +1,6 @@
---
# NOTE: ideally this would be just part of a single play in
-# common/openshift-checks/adhoc.yml that lists the existing checks when
+# private/adhoc.yml that lists the existing checks when
# openshift_checks is not set or run the requested checks. However, to actually
# run the checks we need to have the included dependencies to run first and that
# takes time. To speed up listing checks, we use this separate play that runs
@@ -20,8 +20,6 @@
action: openshift_health_check
when: openshift_checks is undefined or not openshift_checks
-- include: ../openshift-cluster/initialize_groups.yml
+- import_playbook: ../init/main.yml
-- include: ../../common/openshift-cluster/std_include.yml
-
-- include: ../../common/openshift-checks/adhoc.yml
+- import_playbook: private/adhoc.yml
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/default.yaml b/playbooks/openshift-checks/certificate_expiry/default.yaml
index 630135cae..630135cae 100644
--- a/playbooks/byo/openshift-checks/certificate_expiry/default.yaml
+++ b/playbooks/openshift-checks/certificate_expiry/default.yaml
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml b/playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml
index 378d1f154..378d1f154 100644
--- a/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml
+++ b/playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml b/playbooks/openshift-checks/certificate_expiry/easy-mode.yaml
index ae41c7c14..ae41c7c14 100644
--- a/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
+++ b/playbooks/openshift-checks/certificate_expiry/easy-mode.yaml
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml b/playbooks/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
index d80cb6ff4..d80cb6ff4 100644
--- a/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
+++ b/playbooks/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml b/playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
index 2189455b7..2189455b7 100644
--- a/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
+++ b/playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml b/playbooks/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
index 87a0f3be4..87a0f3be4 100644
--- a/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
+++ b/playbooks/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml b/playbooks/openshift-checks/certificate_expiry/longer_warning_period.yaml
index 960457c4b..960457c4b 100644
--- a/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml
+++ b/playbooks/openshift-checks/certificate_expiry/longer_warning_period.yaml
diff --git a/playbooks/byo/openshift-checks/roles b/playbooks/openshift-checks/certificate_expiry/roles
index 20c4c58cf..20c4c58cf 120000
--- a/playbooks/byo/openshift-checks/roles
+++ b/playbooks/openshift-checks/certificate_expiry/roles
diff --git a/playbooks/openshift-checks/health.yml b/playbooks/openshift-checks/health.yml
new file mode 100644
index 000000000..caac06626
--- /dev/null
+++ b/playbooks/openshift-checks/health.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/health.yml
diff --git a/playbooks/openshift-checks/pre-install.yml b/playbooks/openshift-checks/pre-install.yml
new file mode 100644
index 000000000..4511f6e3c
--- /dev/null
+++ b/playbooks/openshift-checks/pre-install.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/pre-install.yml
diff --git a/playbooks/common/openshift-checks/adhoc.yml b/playbooks/openshift-checks/private/adhoc.yml
index dfcef8435..d0deaeb65 100644
--- a/playbooks/common/openshift-checks/adhoc.yml
+++ b/playbooks/openshift-checks/private/adhoc.yml
@@ -1,12 +1,13 @@
---
-- name: OpenShift health checks
+- name: OpenShift Health Checks
hosts: oo_all_hosts
+
roles:
- openshift_health_checker
vars:
- r_openshift_health_checker_playbook_context: adhoc
post_tasks:
- - name: Run health checks
+ - name: Run health checks (adhoc)
action: openshift_health_check
args:
checks: '{{ openshift_checks | default([]) }}'
diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/openshift-checks/private/health.yml
index 21ea785ef..d0921b9d3 100644
--- a/playbooks/common/openshift-checks/health.yml
+++ b/playbooks/openshift-checks/private/health.yml
@@ -1,11 +1,13 @@
---
-- name: Run OpenShift health checks
+- name: OpenShift Health Checks
hosts: oo_all_hosts
+
roles:
- openshift_health_checker
vars:
- r_openshift_health_checker_playbook_context: health
post_tasks:
- - action: openshift_health_check
+ - name: Run health checks (@health)
+ action: openshift_health_check
args:
checks: ['@health']
diff --git a/playbooks/openshift-checks/private/install.yml b/playbooks/openshift-checks/private/install.yml
new file mode 100644
index 000000000..93cf6c359
--- /dev/null
+++ b/playbooks/openshift-checks/private/install.yml
@@ -0,0 +1,51 @@
+---
+- name: Health Check Checkpoint Start
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Set Health Check 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_health:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- name: OpenShift Health Checks
+ hosts: oo_all_hosts
+ any_errors_fatal: true
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: install
+ post_tasks:
+ - name: Run health checks (install) - EL
+ when: ansible_distribution != "Fedora"
+ action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
+ - package_availability
+ - package_version
+ - docker_image_availability
+ - docker_storage
+
+ - name: Run health checks (install) - Fedora
+ when: ansible_distribution == "Fedora"
+ action: openshift_health_check
+ args:
+ checks:
+ - docker_image_availability
+
+- name: Health Check Checkpoint End
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Set Health Check 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_health:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/openshift-checks/private/pre-install.yml
index 88e6f9120..32449d4e4 100644
--- a/playbooks/common/openshift-checks/pre-install.yml
+++ b/playbooks/openshift-checks/private/pre-install.yml
@@ -1,11 +1,13 @@
---
-- name: run OpenShift pre-install checks
+- name: OpenShift Health Checks
hosts: oo_all_hosts
+
roles:
- openshift_health_checker
vars:
- r_openshift_health_checker_playbook_context: pre-install
post_tasks:
- - action: openshift_health_check
+ - name: Run health checks (@preflight)
+ action: openshift_health_check
args:
checks: ['@preflight']
diff --git a/playbooks/byo/openshift-etcd/roles b/playbooks/openshift-checks/private/roles
index 20c4c58cf..20c4c58cf 120000
--- a/playbooks/byo/openshift-etcd/roles
+++ b/playbooks/openshift-checks/private/roles
diff --git a/playbooks/openshift-checks/roles b/playbooks/openshift-checks/roles
new file mode 120000
index 000000000..b741aa3db
--- /dev/null
+++ b/playbooks/openshift-checks/roles
@@ -0,0 +1 @@
+../../roles \ No newline at end of file
diff --git a/playbooks/openshift-etcd/certificates.yml b/playbooks/openshift-etcd/certificates.yml
new file mode 100644
index 000000000..c06e3b575
--- /dev/null
+++ b/playbooks/openshift-etcd/certificates.yml
@@ -0,0 +1,6 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/ca.yml
+
+- import_playbook: private/certificates.yml
diff --git a/playbooks/openshift-etcd/config.yml b/playbooks/openshift-etcd/config.yml
new file mode 100644
index 000000000..c7814207c
--- /dev/null
+++ b/playbooks/openshift-etcd/config.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/config.yml
diff --git a/playbooks/openshift-etcd/embedded2external.yml b/playbooks/openshift-etcd/embedded2external.yml
new file mode 100644
index 000000000..7d090fa9b
--- /dev/null
+++ b/playbooks/openshift-etcd/embedded2external.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/embedded2external.yml
diff --git a/playbooks/openshift-etcd/migrate.yml b/playbooks/openshift-etcd/migrate.yml
new file mode 100644
index 000000000..0340b74a5
--- /dev/null
+++ b/playbooks/openshift-etcd/migrate.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/migrate.yml
diff --git a/playbooks/common/openshift-etcd/ca.yml b/playbooks/openshift-etcd/private/ca.yml
index ac5543be9..72c39d546 100644
--- a/playbooks/common/openshift-etcd/ca.yml
+++ b/playbooks/openshift-etcd/private/ca.yml
@@ -2,11 +2,12 @@
- name: Generate new etcd CA
hosts: oo_first_etcd
roles:
+ - role: openshift_clock
- role: openshift_etcd_facts
tasks:
- - include_role:
+ - import_role:
name: etcd
- tasks_from: ca
+ tasks_from: ca.yml
vars:
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml b/playbooks/openshift-etcd/private/certificates-backup.yml
index d738c8207..2f9bef799 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml
+++ b/playbooks/openshift-etcd/private/certificates-backup.yml
@@ -3,17 +3,17 @@
hosts: oo_first_etcd
any_errors_fatal: true
tasks:
- - include_role:
+ - import_role:
name: etcd
- tasks_from: backup_generated_certificates
- - include_role:
+ tasks_from: backup_generated_certificates.yml
+ - import_role:
name: etcd
- tasks_from: remove_generated_certificates
+ tasks_from: remove_generated_certificates.yml
- name: Backup deployed etcd certificates
hosts: oo_etcd_to_config
any_errors_fatal: true
tasks:
- - include_role:
+ - import_role:
name: etcd
- tasks_from: backup_server_certificates
+ tasks_from: backup_server_certificates.yml
diff --git a/playbooks/openshift-etcd/private/certificates.yml b/playbooks/openshift-etcd/private/certificates.yml
new file mode 100644
index 000000000..7c45938c1
--- /dev/null
+++ b/playbooks/openshift-etcd/private/certificates.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: server_certificates.yml
+
+- import_playbook: master_etcd_certificates.yml
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/openshift-etcd/private/config.yml
index 48d46bbb0..35407969e 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/openshift-etcd/private/config.yml
@@ -1,36 +1,39 @@
---
- name: etcd Install Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set etcd install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_etcd: "In Progress"
- aggregate: false
+ installer_phase_etcd:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
-- include: ca.yml
+- import_playbook: ca.yml
-- include: certificates.yml
+- import_playbook: certificates.yml
- name: Configure etcd
hosts: oo_etcd_to_config
any_errors_fatal: true
roles:
- - role: os_firewall
+ - role: openshift_clock
- role: openshift_etcd
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- role: nickhammond.logrotate
- name: etcd Install Checkpoint End
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set etcd install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_etcd: "Complete"
- aggregate: false
+ installer_phase_etcd:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-etcd/embedded2external.yml b/playbooks/openshift-etcd/private/embedded2external.yml
index 9264f3c32..b71eaacd0 100644
--- a/playbooks/common/openshift-etcd/embedded2external.yml
+++ b/playbooks/openshift-etcd/private/embedded2external.yml
@@ -18,11 +18,11 @@
- role: openshift_facts
tasks:
- name: Check the master API is ready
- include_role:
+ import_role:
name: openshift_master
- tasks_from: check_master_api_is_ready
+ tasks_from: check_master_api_is_ready.yml
- set_fact:
- master_service: "{{ openshift.common.service_type + '-master' }}"
+ master_service: "{{ openshift_service_type + '-master' }}"
embedded_etcd_backup_suffix: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- debug:
msg: "master service name: {{ master_service }}"
@@ -31,60 +31,54 @@
name: "{{ master_service }}"
state: stopped
# 2. backup embedded etcd
- # Can't use with_items with include_role: https://github.com/ansible/ansible/issues/21285
- - include_role:
+ # Can't use with_items with import_role: https://github.com/ansible/ansible/issues/21285
+ - import_role:
name: etcd
- tasks_from: backup
+ tasks_from: backup.yml
vars:
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
r_etcd_common_backup_tag: pre-migrate
r_etcd_common_embedded_etcd: "{{ true }}"
r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}"
- - include_role:
+ - import_role:
name: etcd
- tasks_from: backup.archive
+ tasks_from: backup.archive.yml
vars:
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
r_etcd_common_backup_tag: pre-migrate
r_etcd_common_embedded_etcd: "{{ true }}"
r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}"
# 3. deploy certificates (for etcd and master)
-- include: ca.yml
+- import_playbook: ca.yml
-- include: server_certificates.yml
+- import_playbook: server_certificates.yml
- name: Backup etcd client certificates for master host
hosts: oo_first_master
tasks:
- - include_role:
+ - import_role:
name: etcd
- tasks_from: backup_master_etcd_certificates
+ tasks_from: backup_master_etcd_certificates.yml
- name: Redeploy master etcd certificates
- include: master_etcd_certificates.yml
+ import_playbook: master_etcd_certificates.yml
vars:
etcd_certificates_redeploy: "{{ true }}"
# 4. deploy external etcd
-- include: ../openshift-etcd/config.yml
+- import_playbook: config.yml
# 5. stop external etcd
- name: Cleanse etcd
hosts: oo_etcd_to_config[0]
gather_facts: no
pre_tasks:
- - include_role:
+ - import_role:
name: etcd
- tasks_from: disable_etcd
- vars:
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- - include_role:
+ tasks_from: disable_etcd.yml
+ - import_role:
name: etcd
- tasks_from: clean_data
- vars:
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ tasks_from: clean_data.yml
# 6. copy the embedded etcd backup to the external host
# TODO(jchaloup): if the etcd and first master are on the same host, just copy the directory
@@ -97,22 +91,20 @@
changed_when: False
become: no
- - include_role:
+ - import_role:
name: etcd
- tasks_from: backup.fetch
+ tasks_from: backup.fetch.yml
vars:
- r_etcd_common_etcd_runtime: "{{ hostvars[groups.oo_first_master.0].openshift.common.etcd_runtime }}"
etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}"
r_etcd_common_backup_tag: pre-migrate
r_etcd_common_embedded_etcd: "{{ true }}"
r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
delegate_to: "{{ groups.oo_first_master[0] }}"
- - include_role:
+ - import_role:
name: etcd
- tasks_from: backup.copy
+ tasks_from: backup.copy.yml
vars:
- r_etcd_common_etcd_runtime: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.etcd_runtime }}"
etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}"
r_etcd_common_backup_tag: pre-migrate
r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
@@ -130,19 +122,17 @@
- name: Force new etcd cluster
hosts: oo_etcd_to_config[0]
tasks:
- - include_role:
+ - import_role:
name: etcd
- tasks_from: backup.unarchive
+ tasks_from: backup.unarchive.yml
vars:
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
r_etcd_common_backup_tag: pre-migrate
r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
- - include_role:
+ - import_role:
name: etcd
- tasks_from: backup.force_new_cluster
+ tasks_from: backup.force_new_cluster.yml
vars:
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
r_etcd_common_backup_tag: pre-migrate
r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
etcd_peer: "{{ openshift.common.ip }}"
@@ -153,12 +143,12 @@
- name: Configure master to use external etcd
hosts: oo_first_master
tasks:
- - include_role:
+ - import_role:
name: openshift_master
- tasks_from: configure_external_etcd
+ tasks_from: configure_external_etcd.yml
vars:
etcd_peer_url_scheme: "https"
- etcd_ip: "{{ openshift.common.ip }}"
+ etcd_ip: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.ip }}"
etcd_peer_port: 2379
# 9. start the master
diff --git a/playbooks/common/openshift-etcd/master_etcd_certificates.yml b/playbooks/openshift-etcd/private/master_etcd_certificates.yml
index 0a25aac57..d98470db2 100644
--- a/playbooks/common/openshift-etcd/master_etcd_certificates.yml
+++ b/playbooks/openshift-etcd/private/master_etcd_certificates.yml
@@ -10,5 +10,4 @@
etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
etcd_cert_prefix: "master.etcd-"
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/openshift-etcd/private/migrate.yml
index 31362f2f6..0a2ac7f1a 100644
--- a/playbooks/common/openshift-etcd/migrate.yml
+++ b/playbooks/openshift-etcd/private/migrate.yml
@@ -15,11 +15,10 @@
- name: Run pre-checks
hosts: oo_etcd_to_migrate
tasks:
- - include_role:
+ - import_role:
name: etcd
- tasks_from: migrate.pre_check
+ tasks_from: migrate.pre_check.yml
vars:
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
etcd_peer: "{{ ansible_default_ipv4.address }}"
# TODO: This will be different for release-3.6 branch
@@ -28,8 +27,8 @@
tasks:
- set_fact:
master_services:
- - "{{ openshift.common.service_type + '-master-controllers' }}"
- - "{{ openshift.common.service_type + '-master-api' }}"
+ - "{{ openshift_service_type + '-master-controllers' }}"
+ - "{{ openshift_service_type + '-master-api' }}"
- debug:
msg: "master service name: {{ master_services }}"
- name: Stop masters
@@ -44,13 +43,11 @@
roles:
- role: openshift_facts
post_tasks:
- - include_role:
+ - import_role:
name: etcd
- tasks_from: backup
+ tasks_from: backup.yml
vars:
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
r_etcd_common_backup_tag: pre-migration
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- name: Gate on etcd backup
@@ -60,8 +57,8 @@
tasks:
- set_fact:
etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.oo_etcd_to_migrate)
- | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
+ | lib_utils_oo_select_keys(groups.oo_etcd_to_migrate)
+ | lib_utils_oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
- set_fact:
etcd_backup_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_backup_completed) | list }}"
- fail:
@@ -73,21 +70,18 @@
hosts: oo_etcd_to_migrate
gather_facts: no
pre_tasks:
- - include_role:
+ - import_role:
name: etcd
- tasks_from: disable_etcd
- vars:
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ tasks_from: disable_etcd.yml
- name: Migrate data on first etcd
hosts: oo_etcd_to_migrate[0]
gather_facts: no
tasks:
- - include_role:
+ - import_role:
name: etcd
- tasks_from: migrate
+ tasks_from: migrate.yml
vars:
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
etcd_peer: "{{ openshift.common.ip }}"
etcd_url_scheme: "https"
etcd_peer_url_scheme: "https"
@@ -96,11 +90,10 @@
hosts: oo_etcd_to_migrate[1:]
gather_facts: no
tasks:
- - include_role:
+ - import_role:
name: etcd
- tasks_from: clean_data
+ tasks_from: clean_data.yml
vars:
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
etcd_peer: "{{ openshift.common.ip }}"
etcd_url_scheme: "https"
etcd_peer_url_scheme: "https"
@@ -117,7 +110,7 @@
set_fact:
r_etcd_migrate_success: true
-- include: ./scaleup.yml
+- import_playbook: scaleup.yml
- name: Gate on etcd migration
hosts: oo_masters_to_config
@@ -125,17 +118,17 @@
tasks:
- set_fact:
etcd_migration_completed: "{{ hostvars
- | oo_select_keys(groups.oo_etcd_to_migrate)
- | oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}"
+ | lib_utils_oo_select_keys(groups.oo_etcd_to_migrate)
+ | lib_utils_oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}"
- set_fact:
etcd_migration_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_migration_completed) | list }}"
- name: Add TTLs on the first master
hosts: oo_first_master[0]
tasks:
- - include_role:
+ - import_role:
name: etcd
- tasks_from: migrate.add_ttls
+ tasks_from: migrate.add_ttls.yml
vars:
etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].openshift.common.ip }}"
etcd_url_scheme: "https"
@@ -145,9 +138,9 @@
- name: Configure masters if etcd data migration is succesfull
hosts: oo_masters_to_config
tasks:
- - include_role:
+ - import_role:
name: etcd
- tasks_from: migrate.configure_master
+ tasks_from: migrate.configure_master.yml
when: etcd_migration_failed | length == 0
- debug:
msg: "Skipping master re-configuration since migration failed."
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/openshift-etcd/private/redeploy-ca.yml
index 044875d1c..7b0d99255 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
+++ b/playbooks/openshift-etcd/private/redeploy-ca.yml
@@ -14,14 +14,14 @@
- name: Backup existing etcd CA certificate directories
hosts: oo_etcd_to_config
tasks:
- - include_role:
+ - import_role:
name: etcd
- tasks_from: backup_ca_certificates
- - include_role:
+ tasks_from: backup_ca_certificates.yml
+ - import_role:
name: etcd
- tasks_from: remove_ca_certificates
+ tasks_from: remove_ca_certificates.yml
-- include: ../../openshift-etcd/ca.yml
+- import_playbook: ca.yml
- name: Create temp directory for syncing certs
hosts: localhost
@@ -37,30 +37,28 @@
- name: Distribute etcd CA to etcd hosts
hosts: oo_etcd_to_config
tasks:
- - include_role:
+ - import_role:
name: etcd
- tasks_from: distribute_ca
+ tasks_from: distribute_ca.yml
vars:
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
-- include: ../../openshift-etcd/restart.yml
+- import_playbook: restart.yml
# Do not restart etcd when etcd certificates were previously expired.
when: ('expired' not in (hostvars
- | oo_select_keys(groups['etcd'])
- | oo_collect('check_results.check_results.etcd')
- | oo_collect('health')))
+ | lib_utils_oo_select_keys(groups['etcd'])
+ | lib_utils_oo_collect('check_results.check_results.etcd')
+ | lib_utils_oo_collect('health')))
- name: Retrieve etcd CA certificate
hosts: oo_first_etcd
tasks:
- - include_role:
+ - import_role:
name: etcd
- tasks_from: retrieve_ca_certificates
+ tasks_from: retrieve_ca_certificates.yml
vars:
etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- name: Distribute etcd CA to masters
hosts: oo_masters_to_config
@@ -84,20 +82,20 @@
state: absent
changed_when: false
-- include: ../../openshift-master/restart.yml
+- import_playbook: ../../openshift-master/private/restart.yml
# Do not restart masters when master or etcd certificates were previously expired.
when:
# masters
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ | lib_utils_oo_select_keys(groups['oo_masters_to_config'])
+ | lib_utils_oo_collect('check_results.check_results.ocp_certs')
+ | lib_utils_oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ | lib_utils_oo_select_keys(groups['oo_masters_to_config'])
+ | lib_utils_oo_collect('check_results.check_results.ocp_certs')
+ | lib_utils_oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
# etcd
- ('expired' not in (hostvars
- | oo_select_keys(groups['etcd'])
- | oo_collect('check_results.check_results.etcd')
- | oo_collect('health')))
+ | lib_utils_oo_select_keys(groups['etcd'])
+ | lib_utils_oo_collect('check_results.check_results.etcd')
+ | lib_utils_oo_collect('health')))
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml b/playbooks/openshift-etcd/private/redeploy-certificates.yml
index 4a9fbf7eb..1c8eb27ac 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml
+++ b/playbooks/openshift-etcd/private/redeploy-certificates.yml
@@ -1,6 +1,6 @@
---
- name: Check cert expirys
- hosts: "{{ g_check_expiry_hosts }}"
+ hosts: oo_etcd_to_config
vars:
openshift_certificate_expiry_show_all: yes
roles:
@@ -10,3 +10,9 @@
# this playbook. Service restarts will be skipped if any
# certificates were previously expired.
- role: openshift_certificate_expiry
+
+- import_playbook: certificates-backup.yml
+
+- import_playbook: certificates.yml
+ vars:
+ etcd_certificates_redeploy: true
diff --git a/playbooks/openshift-etcd/private/restart.yml b/playbooks/openshift-etcd/private/restart.yml
new file mode 100644
index 000000000..a2a53651b
--- /dev/null
+++ b/playbooks/openshift-etcd/private/restart.yml
@@ -0,0 +1,19 @@
+---
+- name: Restart etcd
+ hosts: oo_etcd_to_config
+ serial: 1
+ tasks:
+ - import_role:
+ name: etcd
+ tasks_from: restart.yml
+ when:
+ - not g_etcd_certificates_expired | default(false) | bool
+
+- name: Restart etcd
+ hosts: oo_etcd_to_config
+ tasks:
+ - import_role:
+ name: etcd
+ tasks_from: restart.yml
+ when:
+ - g_etcd_certificates_expired | default(false) | bool
diff --git a/playbooks/byo/openshift-glusterfs/roles b/playbooks/openshift-etcd/private/roles
index 20c4c58cf..20c4c58cf 120000
--- a/playbooks/byo/openshift-glusterfs/roles
+++ b/playbooks/openshift-etcd/private/roles
diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/openshift-etcd/private/scaleup.yml
index 20061366c..8a9811a25 100644
--- a/playbooks/common/openshift-etcd/scaleup.yml
+++ b/playbooks/openshift-etcd/private/scaleup.yml
@@ -30,13 +30,12 @@
retries: 3
delay: 10
until: etcd_add_check.rc == 0
- - include_role:
+ - import_role:
name: etcd
- tasks_from: server_certificates
+ tasks_from: server_certificates.yml
vars:
etcd_peers: "{{ groups.oo_new_etcd_to_config | default([], true) }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_new_etcd_to_config | default([], true) }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
roles:
- role: os_firewall
when: etcd_add_check.rc == 0
@@ -48,7 +47,6 @@
etcd_initial_cluster_state: "existing"
etcd_initial_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}"
etcd_ca_setup: False
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- role: nickhammond.logrotate
when: etcd_add_check.rc == 0
post_tasks:
@@ -71,13 +69,13 @@
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_etcd_hosts: "{{ hostvars
- | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'] | default([]) ))
- | oo_collect('openshift.common.hostname')
+ | lib_utils_oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'] | default([]) ))
+ | lib_utils_oo_collect('openshift.common.hostname')
| default(none, true) }}"
openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
roles:
- role: openshift_master_facts
post_tasks:
- - include_role:
+ - import_role:
name: openshift_master
- tasks_from: update_etcd_client_urls
+ tasks_from: update_etcd_client_urls.yml
diff --git a/playbooks/common/openshift-etcd/server_certificates.yml b/playbooks/openshift-etcd/private/server_certificates.yml
index 10e06747b..ebcf4a5ff 100644
--- a/playbooks/common/openshift-etcd/server_certificates.yml
+++ b/playbooks/openshift-etcd/private/server_certificates.yml
@@ -5,11 +5,10 @@
roles:
- role: openshift_etcd_facts
post_tasks:
- - include_role:
+ - import_role:
name: etcd
- tasks_from: server_certificates
+ tasks_from: server_certificates.yml
vars:
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/openshift-etcd/private/upgrade_backup.yml
index d086cad00..97b6edba5 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ b/playbooks/openshift-etcd/private/upgrade_backup.yml
@@ -4,13 +4,11 @@
roles:
- role: openshift_etcd_facts
post_tasks:
- - include_role:
+ - import_role:
name: etcd
- tasks_from: backup
+ tasks_from: backup.yml
vars:
r_etcd_common_backup_tag: "{{ etcd_backup_tag }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- name: Gate on etcd backup
@@ -20,8 +18,8 @@
tasks:
- set_fact:
etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.oo_etcd_hosts_to_backup)
- | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
+ | lib_utils_oo_select_keys(groups.oo_etcd_hosts_to_backup)
+ | lib_utils_oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
- set_fact:
etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) | list }}"
- fail:
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml b/playbooks/openshift-etcd/private/upgrade_image_members.yml
index e5e895775..f9e50e748 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
+++ b/playbooks/openshift-etcd/private/upgrade_image_members.yml
@@ -1,18 +1,17 @@
---
# INPUT etcd_upgrade_version
# INPUT etcd_container_version
-# INPUT openshift.common.is_containerized
+# INPUT openshift_is_containerized
- name: Upgrade containerized hosts to {{ etcd_upgrade_version }}
hosts: oo_etcd_hosts_to_upgrade
serial: 1
tasks:
- - include_role:
+ - import_role:
name: etcd
- tasks_from: upgrade_image
+ tasks_from: upgrade_image.yml
vars:
r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
etcd_peer: "{{ openshift.common.hostname }}"
when:
- - etcd_container_version | default('99') | version_compare(etcd_upgrade_version,'<')
- - openshift.common.is_containerized | bool
+ - etcd_container_version | default('99') is version_compare(etcd_upgrade_version,'<')
+ - openshift_is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/openshift-etcd/private/upgrade_main.yml
index 5b8ba3bb2..8997680f9 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
+++ b/playbooks/openshift-etcd/private/upgrade_main.yml
@@ -6,7 +6,7 @@
# available in the repos. So for Fedora we'll simply skip this, sorry.
- name: Backup etcd before upgrading anything
- include: backup.yml
+ import_playbook: upgrade_backup.yml
vars:
etcd_backup_tag: "pre-upgrade-"
when: openshift_etcd_backup | default(true) | bool
@@ -14,16 +14,16 @@
- name: Drop etcdctl profiles
hosts: oo_etcd_hosts_to_upgrade
tasks:
- - include_role:
+ - import_role:
name: etcd
- tasks_from: drop_etcdctl
+ tasks_from: drop_etcdctl.yml
- name: Perform etcd upgrade
- include: ./upgrade.yml
+ import_playbook: upgrade_step.yml
when: openshift_etcd_upgrade | default(true) | bool
- name: Backup etcd
- include: backup.yml
+ import_playbook: upgrade_backup.yml
vars:
etcd_backup_tag: "post-3.0-"
when: openshift_etcd_backup | default(true) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml
index a2a26bad4..e78cc5826 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
+++ b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml
@@ -1,19 +1,18 @@
---
# INPUT etcd_upgrade_version
# INPUT etcd_rpm_version
-# INPUT openshift.common.is_containerized
+# INPUT openshift_is_containerized
- name: Upgrade to {{ etcd_upgrade_version }}
hosts: oo_etcd_hosts_to_upgrade
serial: 1
tasks:
- - include_role:
+ - import_role:
name: etcd
- tasks_from: upgrade_rpm
+ tasks_from: upgrade_rpm.yml
vars:
r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
- r_etcd_common_etcd_runtime: "host"
etcd_peer: "{{ openshift.common.hostname }}"
when:
- - etcd_rpm_version.stdout | default('99') | version_compare(etcd_upgrade_version, '<')
+ - etcd_rpm_version.stdout | default('99') is version_compare(etcd_upgrade_version, '<')
- ansible_distribution == 'RedHat'
- - not openshift.common.is_containerized | bool
+ - not openshift_is_containerized | bool
diff --git a/playbooks/openshift-etcd/private/upgrade_step.yml b/playbooks/openshift-etcd/private/upgrade_step.yml
new file mode 100644
index 000000000..6aec838d4
--- /dev/null
+++ b/playbooks/openshift-etcd/private/upgrade_step.yml
@@ -0,0 +1,64 @@
+---
+- name: Determine etcd version
+ hosts: oo_etcd_hosts_to_upgrade
+ tasks:
+ - import_role:
+ name: etcd
+ tasks_from: version_detect.yml
+
+- import_playbook: upgrade_rpm_members.yml
+ vars:
+ etcd_upgrade_version: '2.1'
+
+- import_playbook: upgrade_rpm_members.yml
+ vars:
+ etcd_upgrade_version: '2.2'
+
+- import_playbook: upgrade_image_members.yml
+ vars:
+ etcd_upgrade_version: '2.2.5'
+
+- import_playbook: upgrade_rpm_members.yml
+ vars:
+ etcd_upgrade_version: '2.3'
+
+- import_playbook: upgrade_image_members.yml
+ vars:
+ etcd_upgrade_version: '2.3.7'
+
+- import_playbook: upgrade_rpm_members.yml
+ vars:
+ etcd_upgrade_version: '3.0'
+
+- import_playbook: upgrade_image_members.yml
+ vars:
+ etcd_upgrade_version: '3.0.15'
+
+- import_playbook: upgrade_rpm_members.yml
+ vars:
+ etcd_upgrade_version: '3.1'
+
+- import_playbook: upgrade_image_members.yml
+ vars:
+ etcd_upgrade_version: '3.1.3'
+
+- import_playbook: upgrade_rpm_members.yml
+ vars:
+ etcd_upgrade_version: '3.2'
+
+- import_playbook: upgrade_image_members.yml
+ vars:
+ etcd_upgrade_version: '3.2.7'
+
+- name: Upgrade fedora to latest
+ hosts: oo_etcd_hosts_to_upgrade
+ serial: 1
+ tasks:
+ - import_role:
+ name: etcd
+ tasks_from: upgrade_image.yml
+ vars:
+ etcd_peer: "{{ openshift.common.hostname }}"
+ when:
+ - ansible_distribution == 'Fedora'
+ - not openshift_is_containerized | bool
diff --git a/playbooks/openshift-etcd/redeploy-ca.yml b/playbooks/openshift-etcd/redeploy-ca.yml
new file mode 100644
index 000000000..769d694ba
--- /dev/null
+++ b/playbooks/openshift-etcd/redeploy-ca.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/redeploy-ca.yml
diff --git a/playbooks/openshift-etcd/redeploy-certificates.yml b/playbooks/openshift-etcd/redeploy-certificates.yml
new file mode 100644
index 000000000..8ea1994f7
--- /dev/null
+++ b/playbooks/openshift-etcd/redeploy-certificates.yml
@@ -0,0 +1,10 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/redeploy-certificates.yml
+
+- import_playbook: private/restart.yml
+ vars:
+ g_etcd_certificates_expired: "{{ ('expired' in (hostvars | lib_utils_oo_select_keys(groups['etcd']) | lib_utils_oo_collect('check_results.check_results.etcd') | lib_utils_oo_collect('health'))) | bool }}"
+
+- import_playbook: ../openshift-master/private/restart.yml
diff --git a/playbooks/openshift-etcd/restart.yml b/playbooks/openshift-etcd/restart.yml
new file mode 100644
index 000000000..041c1384d
--- /dev/null
+++ b/playbooks/openshift-etcd/restart.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/restart.yml
diff --git a/playbooks/openshift-etcd/scaleup.yml b/playbooks/openshift-etcd/scaleup.yml
new file mode 100644
index 000000000..7e9ab6834
--- /dev/null
+++ b/playbooks/openshift-etcd/scaleup.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/scaleup.yml
diff --git a/playbooks/openshift-etcd/upgrade.yml b/playbooks/openshift-etcd/upgrade.yml
new file mode 100644
index 000000000..ccc797527
--- /dev/null
+++ b/playbooks/openshift-etcd/upgrade.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/evaluate_groups.yml
+
+- import_playbook: private/upgrade_main.yml
diff --git a/playbooks/byo/openshift-glusterfs/README.md b/playbooks/openshift-glusterfs/README.md
index f62aea229..19c381490 100644
--- a/playbooks/byo/openshift-glusterfs/README.md
+++ b/playbooks/openshift-glusterfs/README.md
@@ -26,6 +26,9 @@ file. The hosts in this group are the nodes of the GlusterFS cluster.
devices but you must specify the following variables in `[OSEv3:vars]`:
* `openshift_storage_glusterfs_is_missing=False`
* `openshift_storage_glusterfs_heketi_is_missing=False`
+ * If GlusterFS will be running natively, the target hosts must also be listed
+ in the `nodes` group. They must also already be configured as OpenShift
+ nodes before this playbook runs.
By default, pods for a native GlusterFS cluster will be created in the
`default` namespace. To change this, specify
@@ -60,7 +63,7 @@ glusterfs
[OSEv3:vars]
ansible_ssh_user=root
-deployment_type=origin
+openshift_deployment_type=origin
[masters]
master
diff --git a/playbooks/openshift-glusterfs/config.yml b/playbooks/openshift-glusterfs/config.yml
new file mode 100644
index 000000000..c7814207c
--- /dev/null
+++ b/playbooks/openshift-glusterfs/config.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/config.yml
diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/openshift-glusterfs/private/config.yml
index 80cda9e21..9a5bc143d 100644
--- a/playbooks/common/openshift-glusterfs/config.yml
+++ b/playbooks/openshift-glusterfs/private/config.yml
@@ -1,46 +1,60 @@
---
- name: GlusterFS Install Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set GlusterFS install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_glusterfs: "In Progress"
- aggregate: false
+ installer_phase_glusterfs:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- name: Open firewall ports for GlusterFS nodes
hosts: glusterfs
tasks:
- - include_role:
+ - import_role:
name: openshift_storage_glusterfs
tasks_from: firewall.yml
when:
- openshift_storage_glusterfs_is_native | default(True) | bool
+ - import_role:
+ name: openshift_storage_glusterfs
+ tasks_from: kernel_modules.yml
+ when:
+ - openshift_storage_glusterfs_is_native | default(True) | bool
- name: Open firewall ports for GlusterFS registry nodes
hosts: glusterfs_registry
tasks:
- - include_role:
+ - import_role:
name: openshift_storage_glusterfs
tasks_from: firewall.yml
when:
- openshift_storage_glusterfs_registry_is_native | default(True) | bool
+ - import_role:
+ name: openshift_storage_glusterfs
+ tasks_from: kernel_modules.yml
+ when:
+ - openshift_storage_glusterfs_registry_is_native | default(True) | bool
- name: Configure GlusterFS
hosts: oo_first_master
tasks:
- name: setup glusterfs
- include_role:
+ import_role:
name: openshift_storage_glusterfs
when: groups.oo_glusterfs_to_config | default([]) | count > 0
- name: GlusterFS Install Checkpoint End
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set GlusterFS install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_glusterfs: "Complete"
- aggregate: false
+ installer_phase_glusterfs:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/openshift-glusterfs/private/registry.yml b/playbooks/openshift-glusterfs/private/registry.yml
new file mode 100644
index 000000000..917b729f9
--- /dev/null
+++ b/playbooks/openshift-glusterfs/private/registry.yml
@@ -0,0 +1,20 @@
+---
+- import_playbook: config.yml
+
+- name: Create persistent volumes
+ hosts: oo_first_master
+ roles:
+ - role: openshift_persistent_volumes
+ when: openshift_hosted_registry_storage_glusterfs_swap | default(False)
+
+- name: Create Hosted Resources
+ hosts: oo_first_master
+ tags:
+ - hosted
+ pre_tasks:
+ - set_fact:
+ openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
+ openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
+ when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
+ roles:
+ - role: openshift_hosted
diff --git a/playbooks/common/openshift-etcd/roles b/playbooks/openshift-glusterfs/private/roles
index e2b799b9d..e2b799b9d 120000
--- a/playbooks/common/openshift-etcd/roles
+++ b/playbooks/openshift-glusterfs/private/roles
diff --git a/playbooks/openshift-glusterfs/registry.yml b/playbooks/openshift-glusterfs/registry.yml
new file mode 100644
index 000000000..5e3b18536
--- /dev/null
+++ b/playbooks/openshift-glusterfs/registry.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/registry.yml
diff --git a/playbooks/openshift-hosted/config.yml b/playbooks/openshift-hosted/config.yml
new file mode 100644
index 000000000..c7814207c
--- /dev/null
+++ b/playbooks/openshift-hosted/config.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/config.yml
diff --git a/playbooks/openshift-hosted/private/cockpit-ui.yml b/playbooks/openshift-hosted/private/cockpit-ui.yml
new file mode 100644
index 000000000..d6529425b
--- /dev/null
+++ b/playbooks/openshift-hosted/private/cockpit-ui.yml
@@ -0,0 +1,8 @@
+---
+- name: Create Hosted Resources - cockpit-ui
+ hosts: oo_first_master
+ roles:
+ - role: cockpit-ui
+ when:
+ - openshift_hosted_manage_registry | default(true) | bool
+ - not (openshift_docker_hosted_registry_insecure | default(false)) | bool
diff --git a/playbooks/openshift-hosted/private/config.yml b/playbooks/openshift-hosted/private/config.yml
new file mode 100644
index 000000000..4e7b98da2
--- /dev/null
+++ b/playbooks/openshift-hosted/private/config.yml
@@ -0,0 +1,45 @@
+---
+- name: Hosted Install Checkpoint Start
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Set Hosted install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_hosted:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- import_playbook: create_persistent_volumes.yml
+
+- import_playbook: openshift_default_storage_class.yml
+
+- import_playbook: openshift_hosted_create_projects.yml
+
+- import_playbook: openshift_hosted_router.yml
+
+- import_playbook: openshift_hosted_registry.yml
+
+- import_playbook: openshift_hosted_wait_for_pods.yml
+
+- import_playbook: openshift_hosted_registry_storage.yml
+
+- import_playbook: cockpit-ui.yml
+
+- import_playbook: install_docker_gc.yml
+ when:
+ - openshift_use_crio | default(False) | bool
+ - openshift_crio_enable_docker_gc | default(False) | bool
+
+- name: Hosted Install Checkpoint End
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Set Hosted install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_hosted:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/openshift-hosted/private/create_persistent_volumes.yml b/playbooks/openshift-hosted/private/create_persistent_volumes.yml
new file mode 100644
index 000000000..41ae2eb69
--- /dev/null
+++ b/playbooks/openshift-hosted/private/create_persistent_volumes.yml
@@ -0,0 +1,5 @@
+---
+- name: Create Hosted Resources - persistent volumes
+ hosts: oo_first_master
+ roles:
+ - role: openshift_persistent_volumes
diff --git a/playbooks/openshift-hosted/private/install_docker_gc.yml b/playbooks/openshift-hosted/private/install_docker_gc.yml
new file mode 100644
index 000000000..03eb542d3
--- /dev/null
+++ b/playbooks/openshift-hosted/private/install_docker_gc.yml
@@ -0,0 +1,7 @@
+---
+- name: Install docker gc
+ hosts: oo_first_master
+ gather_facts: false
+ tasks:
+ - import_role:
+ name: openshift_docker_gc
diff --git a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml b/playbooks/openshift-hosted/private/openshift_default_storage_class.yml
index 4b4f19690..62fe0dd60 100644
--- a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml
+++ b/playbooks/openshift-hosted/private/openshift_default_storage_class.yml
@@ -3,4 +3,4 @@
hosts: oo_first_master
roles:
- role: openshift_default_storage_class
- when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')
+ when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce' or openshift_cloudprovider_kind == 'openstack')
diff --git a/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml b/playbooks/openshift-hosted/private/openshift_hosted_create_projects.yml
index d5ca5185c..b09432da2 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml
+++ b/playbooks/openshift-hosted/private/openshift_hosted_create_projects.yml
@@ -2,6 +2,6 @@
- name: Create Hosted Resources - openshift projects
hosts: oo_first_master
tasks:
- - include_role:
+ - import_role:
name: openshift_hosted
tasks_from: create_projects.yml
diff --git a/playbooks/common/openshift-cluster/openshift_hosted_registry.yml b/playbooks/openshift-hosted/private/openshift_hosted_registry.yml
index 2a91a827c..659c95eda 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted_registry.yml
+++ b/playbooks/openshift-hosted/private/openshift_hosted_registry.yml
@@ -5,7 +5,7 @@
- set_fact:
openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
- - include_role:
+ - import_role:
name: openshift_hosted
tasks_from: registry.yml
when:
diff --git a/playbooks/openshift-hosted/private/openshift_hosted_registry_storage.yml b/playbooks/openshift-hosted/private/openshift_hosted_registry_storage.yml
new file mode 100644
index 000000000..cfc47c9b2
--- /dev/null
+++ b/playbooks/openshift-hosted/private/openshift_hosted_registry_storage.yml
@@ -0,0 +1,13 @@
+---
+# This playbook waits for registry and router pods after both have been
+# created. It is intended to allow the tasks of deploying both to complete
+# before polling to save time.
+- name: Poll for hosted pod deployments
+ hosts: oo_first_master
+ tasks:
+ - import_role:
+ name: openshift_hosted
+ tasks_from: registry_storage.yml
+ when:
+ - openshift_hosted_manage_registry | default(True) | bool
+ - openshift_hosted_registry_registryurl is defined
diff --git a/playbooks/common/openshift-cluster/openshift_hosted_router.yml b/playbooks/openshift-hosted/private/openshift_hosted_router.yml
index bcb5a34a4..353377189 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted_router.yml
+++ b/playbooks/openshift-hosted/private/openshift_hosted_router.yml
@@ -5,7 +5,7 @@
- set_fact:
openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
- - include_role:
+ - import_role:
name: openshift_hosted
tasks_from: router.yml
when:
diff --git a/playbooks/openshift-hosted/private/openshift_hosted_wait_for_pods.yml b/playbooks/openshift-hosted/private/openshift_hosted_wait_for_pods.yml
new file mode 100644
index 000000000..1f6868c2a
--- /dev/null
+++ b/playbooks/openshift-hosted/private/openshift_hosted_wait_for_pods.yml
@@ -0,0 +1,26 @@
+---
+# This playbook waits for registry and router pods after both have been
+# created. It is intended to allow the tasks of deploying both to complete
+# before polling to save time.
+- name: Poll for hosted pod deployments
+ hosts: oo_first_master
+ tasks:
+ - import_role:
+ name: openshift_hosted
+ tasks_from: wait_for_pod.yml
+ vars:
+ l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_router_wait }}"
+ l_openshift_hosted_wfp_items: "{{ openshift_hosted_routers }}"
+ when:
+ - openshift_hosted_manage_router | default(True) | bool
+ - openshift_hosted_router_registryurl is defined
+
+ - import_role:
+ name: openshift_hosted
+ tasks_from: wait_for_pod.yml
+ vars:
+ l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_registry_wait }}"
+ l_openshift_hosted_wfp_items: "{{ r_openshift_hosted_registry_list }}"
+ when:
+ - openshift_hosted_manage_registry | default(True) | bool
+ - openshift_hosted_registry_registryurl is defined
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml
index afd5463b2..b817221b8 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
+++ b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml
@@ -17,7 +17,7 @@
- name: Determine if docker-registry exists
command: >
- {{ openshift.common.client_binary }} get dc/docker-registry -o json
+ {{ openshift_client_binary }} get dc/docker-registry -o json
--config={{ mktemp.stdout }}/admin.kubeconfig
-n default
register: l_docker_registry_dc
@@ -26,11 +26,11 @@
- set_fact:
docker_registry_env_vars: "{{ ((l_docker_registry_dc.stdout | from_json)['spec']['template']['spec']['containers'][0]['env']
- | oo_collect('name'))
+ | lib_utils_oo_collect('name'))
| default([]) }}"
docker_registry_secrets: "{{ ((l_docker_registry_dc.stdout | from_json)['spec']['template']['spec']['volumes']
- | oo_collect('secret')
- | oo_collect('secretName'))
+ | lib_utils_oo_collect('secret')
+ | lib_utils_oo_collect('secretName'))
| default([]) }}"
changed_when: false
when: l_docker_registry_dc.rc == 0
@@ -38,7 +38,7 @@
# Replace dc/docker-registry environment variable certificate data if set.
- name: Update docker-registry environment variables
shell: >
- {{ openshift.common.client_binary }} env dc/docker-registry
+ {{ openshift_client_binary }} env dc/docker-registry
OPENSHIFT_CA_DATA="$(cat /etc/origin/master/ca.crt)"
OPENSHIFT_CERT_DATA="$(cat /etc/origin/master/openshift-registry.crt)"
OPENSHIFT_KEY_DATA="$(cat /etc/origin/master/openshift-registry.key)"
@@ -62,7 +62,7 @@
- name: Generate registry certificate
command: >
- {{ openshift.common.client_binary }} adm ca create-server-cert
+ {{ openshift_client_binary }} adm ca create-server-cert
--signer-cert={{ openshift.common.config_base }}/master/ca.crt
--signer-key={{ openshift.common.config_base }}/master/ca.key
--signer-serial={{ openshift.common.config_base }}/master/ca.serial.txt
@@ -70,9 +70,7 @@
--hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc,docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}"
--cert={{ openshift.common.config_base }}/master/registry.crt
--key={{ openshift.common.config_base }}/master/registry.key
- {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
--expire-days={{ openshift_hosted_registry_cert_expire_days | default(730) }}
- {% endif %}
- name: Update registry certificates secret
oc_secret:
@@ -90,7 +88,7 @@
- name: Redeploy docker registry
command: >
- {{ openshift.common.client_binary }} deploy dc/docker-registry
+ {{ openshift_client_binary }} deploy dc/docker-registry
--latest
--config={{ mktemp.stdout }}/admin.kubeconfig
-n default
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml
index 2116c745c..0df748f47 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
+++ b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml
@@ -17,7 +17,7 @@
- name: Determine if router exists
command: >
- {{ openshift.common.client_binary }} get dc/router -o json
+ {{ openshift_client_binary }} get dc/router -o json
--config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
-n default
register: l_router_dc
@@ -26,7 +26,7 @@
- name: Determine if router service exists
command: >
- {{ openshift.common.client_binary }} get svc/router -o json
+ {{ openshift_client_binary }} get svc/router -o json
--config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
-n default
register: l_router_svc
@@ -36,11 +36,11 @@
- name: Collect router environment variables and secrets
set_fact:
router_env_vars: "{{ ((l_router_dc.stdout | from_json)['spec']['template']['spec']['containers'][0]['env']
- | oo_collect('name'))
+ | lib_utils_oo_collect('name'))
| default([]) }}"
router_secrets: "{{ ((l_router_dc.stdout | from_json)['spec']['template']['spec']['volumes']
- | oo_collect('secret')
- | oo_collect('secretName'))
+ | lib_utils_oo_collect('secret')
+ | lib_utils_oo_collect('secretName'))
| default([]) }}"
changed_when: false
when: l_router_dc.rc == 0
@@ -52,7 +52,7 @@
- name: Update router environment variables
shell: >
- {{ openshift.common.client_binary }} env dc/router
+ {{ openshift_client_binary }} env dc/router
OPENSHIFT_CA_DATA="$(cat /etc/origin/master/ca.crt)"
OPENSHIFT_CERT_DATA="$(cat /etc/origin/master/openshift-router.crt)"
OPENSHIFT_KEY_DATA="$(cat /etc/origin/master/openshift-router.key)"
@@ -78,7 +78,7 @@
- name: Remove router service annotations
command: >
- {{ openshift.common.client_binary }} annotate service/router
+ {{ openshift_client_binary }} annotate service/router
service.alpha.openshift.io/serving-cert-secret-name-
service.alpha.openshift.io/serving-cert-signed-by-
--config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
@@ -86,7 +86,7 @@
- name: Add serving-cert-secret annotation to router service
command: >
- {{ openshift.common.client_binary }} annotate service/router
+ {{ openshift_client_binary }} annotate service/router
service.alpha.openshift.io/serving-cert-secret-name=router-certs
--config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
-n default
@@ -115,7 +115,7 @@
- ('service.alpha.openshift.io/serving-cert-secret-name') not in router_service_annotations
- ('service.alpha.openshift.io/serving-cert-signed-by') not in router_service_annotations
- - include_role:
+ - import_role:
name: openshift_hosted
tasks_from: main
vars:
@@ -129,7 +129,7 @@
- name: Redeploy router
command: >
- {{ openshift.common.client_binary }} deploy dc/router
+ {{ openshift_client_binary }} deploy dc/router
--latest
--config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
-n default
diff --git a/playbooks/byo/openshift-master/roles b/playbooks/openshift-hosted/private/roles
index 20c4c58cf..20c4c58cf 120000
--- a/playbooks/byo/openshift-master/roles
+++ b/playbooks/openshift-hosted/private/roles
diff --git a/playbooks/openshift-hosted/redeploy-registry-certificates.yml b/playbooks/openshift-hosted/redeploy-registry-certificates.yml
new file mode 100644
index 000000000..518a1d624
--- /dev/null
+++ b/playbooks/openshift-hosted/redeploy-registry-certificates.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/redeploy-registry-certificates.yml
diff --git a/playbooks/openshift-hosted/redeploy-router-certificates.yml b/playbooks/openshift-hosted/redeploy-router-certificates.yml
new file mode 100644
index 000000000..a74dd8c79
--- /dev/null
+++ b/playbooks/openshift-hosted/redeploy-router-certificates.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/redeploy-router-certificates.yml
diff --git a/playbooks/openshift-loadbalancer/config.yml b/playbooks/openshift-loadbalancer/config.yml
new file mode 100644
index 000000000..c7814207c
--- /dev/null
+++ b/playbooks/openshift-loadbalancer/config.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/config.yml
diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/openshift-loadbalancer/private/config.yml
index 2a703cb61..54c8483c8 100644
--- a/playbooks/common/openshift-loadbalancer/config.yml
+++ b/playbooks/openshift-loadbalancer/private/config.yml
@@ -1,33 +1,26 @@
---
- name: Load Balancer Install Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set load balancer install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_loadbalancer: "In Progress"
- aggregate: false
-
-- name: Configure firewall and docker for load balancers
- hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config
- vars:
- openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
- roles:
- - role: os_firewall
- - role: openshift_docker
- when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool
+ installer_phase_loadbalancer:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- name: Configure load balancers
hosts: oo_lb_to_config
vars:
openshift_loadbalancer_frontends: "{{ (openshift_master_api_port | default(8443)
- | oo_openshift_loadbalancer_frontends(hostvars | oo_select_keys(groups['oo_masters']),
+ | lib_utils_oo_loadbalancer_frontends(hostvars | lib_utils_oo_select_keys(groups['oo_masters']),
openshift_use_nuage | default(false),
nuage_mon_rest_server_port | default(none)))
+ openshift_loadbalancer_additional_frontends | default([]) }}"
openshift_loadbalancer_backends: "{{ (openshift_master_api_port | default(8443)
- | oo_openshift_loadbalancer_backends(hostvars | oo_select_keys(groups['oo_masters']),
+ | lib_utils_oo_loadbalancer_backends(hostvars | lib_utils_oo_select_keys(groups['oo_masters']),
openshift_use_nuage | default(false),
nuage_mon_rest_server_port | default(none)))
+ openshift_loadbalancer_additional_backends | default([]) }}"
@@ -37,11 +30,13 @@
- role: tuned
- name: Load Balancer Install Checkpoint End
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set load balancer install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_loadbalancer: "Complete"
- aggregate: false
+ installer_phase_loadbalancer:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/byo/openshift-node/roles b/playbooks/openshift-loadbalancer/private/roles
index 20c4c58cf..20c4c58cf 120000
--- a/playbooks/byo/openshift-node/roles
+++ b/playbooks/openshift-loadbalancer/private/roles
diff --git a/playbooks/openshift-logging/config.yml b/playbooks/openshift-logging/config.yml
new file mode 100644
index 000000000..83d330284
--- /dev/null
+++ b/playbooks/openshift-logging/config.yml
@@ -0,0 +1,9 @@
+---
+#
+# This playbook is a preview of upcoming changes for installing
+# Hosted logging on. See inventory/hosts.example for the
+# currently supported method.
+#
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/config.yml
diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/openshift-logging/private/config.yml
index 529a4c939..d6b26647c 100644
--- a/playbooks/common/openshift-cluster/openshift_logging.yml
+++ b/playbooks/openshift-logging/private/config.yml
@@ -1,33 +1,38 @@
---
- name: Logging Install Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Logging install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_logging: "In Progress"
- aggregate: false
+ installer_phase_logging:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- name: OpenShift Aggregated Logging
hosts: oo_first_master
roles:
- openshift_logging
+# TODO: Remove when master config property is removed
- name: Update Master configs
hosts: oo_masters:!oo_first_master
tasks:
- block:
- - include_role:
+ - import_role:
name: openshift_logging
tasks_from: update_master_config
- name: Logging Install Checkpoint End
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Logging install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_logging: "Complete"
- aggregate: false
+ installer_phase_logging:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-checks/roles b/playbooks/openshift-logging/private/roles
index 20c4c58cf..20c4c58cf 120000
--- a/playbooks/common/openshift-checks/roles
+++ b/playbooks/openshift-logging/private/roles
diff --git a/playbooks/openshift-management/add_container_provider.yml b/playbooks/openshift-management/add_container_provider.yml
new file mode 100644
index 000000000..e0970f525
--- /dev/null
+++ b/playbooks/openshift-management/add_container_provider.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/evaluate_groups.yml
+
+- import_playbook: private/add_container_provider.yml
diff --git a/playbooks/openshift-management/add_many_container_providers.yml b/playbooks/openshift-management/add_many_container_providers.yml
new file mode 100644
index 000000000..45231a495
--- /dev/null
+++ b/playbooks/openshift-management/add_many_container_providers.yml
@@ -0,0 +1,36 @@
+---
+- hosts: localhost
+ tasks:
+ - name: Ensure the container provider configuration is defined
+ assert:
+ that: container_providers_config is defined
+ msg: |
+ Error: Must provide providers config path. Fix: Add '-e container_providers_config=/path/to/your/config' to the ansible-playbook command
+
+ - name: Include providers/management configuration
+ include_vars:
+ file: "{{ container_providers_config }}"
+
+ - name: Ensure this cluster is a container provider
+ uri:
+ url: "https://{{ management_server['hostname'] }}/api/providers"
+ body_format: json
+ method: POST
+ user: "{{ management_server['user'] }}"
+ password: "{{ management_server['password'] }}"
+ validate_certs: no
+ # Docs on formatting the BODY of the POST request:
+ # http://manageiq.org/docs/reference/latest/api/reference/providers.html#specifying-connection-configurations
+ body: "{{ item }}"
+ failed_when: false
+ with_items: "{{ container_providers }}"
+ register: results
+
+ # Include openshift_management for access to filter_plugins.
+ - import_role:
+ name: openshift_management
+ tasks_from: noop
+
+ - name: print each result
+ debug:
+ msg: "{{ results.results | oo_filter_container_providers }}"
diff --git a/playbooks/openshift-management/config.yml b/playbooks/openshift-management/config.yml
new file mode 100644
index 000000000..c7814207c
--- /dev/null
+++ b/playbooks/openshift-management/config.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/config.yml
diff --git a/playbooks/openshift-management/private/add_container_provider.yml b/playbooks/openshift-management/private/add_container_provider.yml
new file mode 100644
index 000000000..25d4058e5
--- /dev/null
+++ b/playbooks/openshift-management/private/add_container_provider.yml
@@ -0,0 +1,8 @@
+---
+- name: Add Container Provider to Management
+ hosts: oo_first_master
+ tasks:
+ - name: Run the Management Integration Tasks
+ import_role:
+ name: openshift_management
+ tasks_from: add_container_provider
diff --git a/playbooks/common/openshift-management/config.yml b/playbooks/openshift-management/private/config.yml
index 908679e81..22f3ee8f3 100644
--- a/playbooks/common/openshift-management/config.yml
+++ b/playbooks/openshift-management/private/config.yml
@@ -1,13 +1,15 @@
---
- name: Management Install Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Management install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_management: "In Progress"
- aggregate: false
+ installer_phase_management:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- name: Setup CFME
hosts: oo_first_master
@@ -19,17 +21,19 @@
tasks:
- name: Run the CFME Setup Role
- include_role:
+ import_role:
name: openshift_management
vars:
template_dir: "{{ hostvars[groups.masters.0].r_openshift_management_mktemp.stdout }}"
- name: Management Install Checkpoint End
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Management install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_management: "Complete"
- aggregate: false
+ installer_phase_management:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-management/roles b/playbooks/openshift-management/private/roles
index 20c4c58cf..20c4c58cf 120000
--- a/playbooks/common/openshift-management/roles
+++ b/playbooks/openshift-management/private/roles
diff --git a/playbooks/common/openshift-management/uninstall.yml b/playbooks/openshift-management/private/uninstall.yml
index 698d93405..6097ea45a 100644
--- a/playbooks/common/openshift-management/uninstall.yml
+++ b/playbooks/openshift-management/private/uninstall.yml
@@ -1,8 +1,8 @@
---
- name: Uninstall CFME
- hosts: masters
+ hosts: masters[0]
tasks:
- name: Run the CFME Uninstall Role Tasks
- include_role:
+ import_role:
name: openshift_management
tasks_from: uninstall
diff --git a/playbooks/openshift-management/roles b/playbooks/openshift-management/roles
new file mode 120000
index 000000000..b741aa3db
--- /dev/null
+++ b/playbooks/openshift-management/roles
@@ -0,0 +1 @@
+../../roles \ No newline at end of file
diff --git a/playbooks/openshift-management/uninstall.yml b/playbooks/openshift-management/uninstall.yml
new file mode 100644
index 000000000..c92ade786
--- /dev/null
+++ b/playbooks/openshift-management/uninstall.yml
@@ -0,0 +1,2 @@
+---
+- import_playbook: private/uninstall.yml
diff --git a/playbooks/openshift-master/additional_config.yml b/playbooks/openshift-master/additional_config.yml
new file mode 100644
index 000000000..8105f7f88
--- /dev/null
+++ b/playbooks/openshift-master/additional_config.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/additional_config.yml
diff --git a/playbooks/openshift-master/certificates.yml b/playbooks/openshift-master/certificates.yml
new file mode 100644
index 000000000..7ae87c09a
--- /dev/null
+++ b/playbooks/openshift-master/certificates.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/certificates.yml
diff --git a/playbooks/openshift-master/config.yml b/playbooks/openshift-master/config.yml
new file mode 100644
index 000000000..c7814207c
--- /dev/null
+++ b/playbooks/openshift-master/config.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/config.yml
diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml
index e1472ce38..85be0e600 100644
--- a/playbooks/common/openshift-master/additional_config.yml
+++ b/playbooks/openshift-master/private/additional_config.yml
@@ -1,13 +1,15 @@
---
- name: Master Additional Install Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Master Additional install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_master_additional: "In Progress"
- aggregate: false
+ installer_phase_master_additional:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- name: Additional master configuration
hosts: oo_first_master
@@ -17,10 +19,10 @@
openshift_master_ha: "{{ groups.oo_masters | length > 1 }}"
omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}"
roles:
- - role: openshift_master_cluster
- when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
+ - role: openshift_project_request_template
+ when: openshift_project_request_template_manage
- role: openshift_examples
- when: openshift_install_examples | default(true, true) | bool
+ when: openshift_install_examples | default(true) | bool
registry_url: "{{ openshift.master.registry_url }}"
- role: openshift_hosted_templates
registry_url: "{{ openshift.master.registry_url }}"
@@ -28,19 +30,21 @@
when: openshift_use_manageiq | default(true) | bool
- role: cockpit
when:
- - openshift.common.is_atomic
- - deployment_type == 'openshift-enterprise'
+ - not openshift_is_atomic | bool
+ - openshift_deployment_type == 'openshift-enterprise'
- osm_use_cockpit is undefined or osm_use_cockpit | bool
- openshift.common.deployment_subtype != 'registry'
- role: flannel_register
when: openshift_use_flannel | default(false) | bool
- name: Master Additional Install Checkpoint End
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Master Additional install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_master_additional: "Complete"
- aggregate: false
+ installer_phase_master_additional:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml b/playbooks/openshift-master/private/certificates-backup.yml
index 4dbc041b0..4dbc041b0 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml
+++ b/playbooks/openshift-master/private/certificates-backup.yml
diff --git a/playbooks/common/openshift-master/certificates.yml b/playbooks/openshift-master/private/certificates.yml
index f6afbc36f..d42d4402b 100644
--- a/playbooks/common/openshift-master/certificates.yml
+++ b/playbooks/openshift-master/private/certificates.yml
@@ -9,6 +9,6 @@
- role: openshift_ca
- role: openshift_master_certificates
openshift_master_etcd_hosts: "{{ hostvars
- | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
- | oo_collect('openshift.common.hostname')
+ | lib_utils_oo_select_keys(groups['oo_etcd_to_config'] | default([]))
+ | lib_utils_oo_collect('openshift.common.hostname')
| default(none, true) }}"
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/openshift-master/private/config.yml
index b359919ba..153ea9993 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/openshift-master/private/config.yml
@@ -1,15 +1,17 @@
---
- name: Master Install Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Master install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_master: "In Progress"
- aggregate: false
+ installer_phase_master:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
-- include: certificates.yml
+- import_playbook: certificates.yml
- name: Disable excluders
hosts: oo_masters_to_config
@@ -17,7 +19,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Gather and set facts for master hosts
hosts: oo_masters_to_config
@@ -46,7 +47,7 @@
state: absent
when:
- rpmgenerated_config.stat.exists == true
- - deployment_type == 'openshift-enterprise'
+ - openshift_deployment_type == 'openshift-enterprise'
with_items:
- master
- node
@@ -55,9 +56,9 @@
- set_fact:
openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
openshift_master_etcd_hosts: "{{ hostvars
- | oo_select_keys(groups['oo_etcd_to_config']
+ | lib_utils_oo_select_keys(groups['oo_etcd_to_config']
| default([]))
- | oo_collect('openshift.common.hostname')
+ | lib_utils_oo_collect('openshift.common.hostname')
| default(none, true) }}"
roles:
- openshift_facts
@@ -149,8 +150,8 @@
hosts: oo_first_master
vars:
g_session_secrets_present: "{{ (openshift.master.session_auth_secrets | default([])) | length > 0 and (openshift.master.session_encryption_secrets | default([])) | length > 0 }}"
- g_session_auth_secrets: "{{ [ 24 | oo_generate_secret ] }}"
- g_session_encryption_secrets: "{{ [ 24 | oo_generate_secret ] }}"
+ g_session_auth_secrets: "{{ [ 24 | lib_utils_oo_generate_secret ] }}"
+ g_session_encryption_secrets: "{{ [ 24 | lib_utils_oo_generate_secret ] }}"
roles:
- role: openshift_facts
tasks:
@@ -171,24 +172,19 @@
openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}"
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_etcd_hosts: "{{ hostvars
- | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
- | oo_collect('openshift.common.hostname')
+ | lib_utils_oo_select_keys(groups['oo_etcd_to_config'] | default([]))
+ | lib_utils_oo_collect('openshift.common.hostname')
| default(none, true) }}"
- openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
- | oo_collect('openshift.common.ip') | default([]) | join(',')
+ openshift_no_proxy_etcd_host_ips: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config'] | default([]))
+ | lib_utils_oo_collect('openshift.common.ip') | default([]) | join(',')
}}"
roles:
- - role: os_firewall
- role: openshift_master_facts
- - role: openshift_hosted_facts
- role: openshift_clock
- role: openshift_cloud_provider
- role: openshift_builddefaults
- role: openshift_buildoverrides
- role: nickhammond.logrotate
- - role: contiv
- contiv_role: netmaster
- when: openshift_use_contiv | default(False) | bool
- role: openshift_master
openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}"
@@ -207,11 +203,17 @@
- role: calico_master
when: openshift_use_calico | default(false) | bool
tasks:
- - include_role:
+ - import_role:
name: kuryr
tasks_from: master
when: openshift_use_kuryr | default(false) | bool
+ - name: Setup the node group config maps
+ import_role:
+ name: openshift_node_group
+ when: openshift_master_bootstrap_enabled | default(false) | bool
+ run_once: True
+
post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
@@ -220,8 +222,10 @@
- name: Configure API Aggregation on masters
hosts: oo_masters
serial: 1
+ roles:
+ - role: openshift_facts
tasks:
- - include: tasks/wire_aggregator.yml
+ - include_tasks: tasks/wire_aggregator.yml
- name: Re-enable excluder if it was previously enabled
hosts: oo_masters_to_config
@@ -229,14 +233,15 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Master Install Checkpoint End
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Master install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_master: "Complete"
- aggregate: false
+ installer_phase_master:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/openshift-master/private/redeploy-certificates.yml b/playbooks/openshift-master/private/redeploy-certificates.yml
new file mode 100644
index 000000000..c0f75ae80
--- /dev/null
+++ b/playbooks/openshift-master/private/redeploy-certificates.yml
@@ -0,0 +1,6 @@
+---
+- import_playbook: certificates-backup.yml
+
+- import_playbook: certificates.yml
+ vars:
+ openshift_certificates_redeploy: true
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/openshift-master/private/redeploy-openshift-ca.yml
index 2068ed199..9d3c12ba1 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
+++ b/playbooks/openshift-master/private/redeploy-openshift-ca.yml
@@ -1,11 +1,4 @@
---
-- name: Verify OpenShift version is greater than or equal to 1.2 or 3.2
- hosts: oo_first_master
- tasks:
- - fail:
- msg: "The current OpenShift version is less than 1.2/3.2 and does not support CA bundles."
- when: not openshift.common.version_gte_3_2_or_1_2 | bool
-
- name: Check cert expirys
hosts: oo_nodes_to_config:oo_masters_to_config:oo_etcd_to_config
vars:
@@ -43,11 +36,6 @@
when: (g_master_config_output.content|b64decode|from_yaml).oauthConfig.masterCA != 'ca-bundle.crt'
- modify_yaml:
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: servingInfo.clientCA
- yaml_value: ca.crt
- when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt'
- - modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: etcdClientInfo.ca
yaml_value: ca-bundle.crt
when:
@@ -67,6 +55,13 @@
when:
- groups.oo_etcd_to_config | default([]) | length == 0
- (g_master_config_output.content|b64decode|from_yaml).etcdConfig.servingInfo.clientCA != 'ca-bundle.crt'
+ # Set servingInfo.clientCA to client-ca-bundle.crt in order to roll the CA certificate.
+ # This change will be reverted in playbooks/redeploy-certificates.yml
+ - modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: servingInfo.clientCA
+ yaml_value: client-ca-bundle.crt
+ when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'client-ca-bundle.crt'
- name: Copy current OpenShift CA to legacy directory
hosts: oo_masters_to_config
@@ -155,6 +150,7 @@
- ca.key
- ca-bundle.crt
- ca.serial.txt
+ - client-ca-bundle.crt
delegate_to: "{{ openshift_ca_host }}"
run_once: true
changed_when: false
@@ -173,6 +169,7 @@
- ca.key
- ca-bundle.crt
- ca.serial.txt
+ - client-ca-bundle.crt
- name: Update master client kubeconfig CA data
kubeclient_ca:
client_path: "{{ openshift.common.config_base }}/master/openshift-master.kubeconfig"
@@ -210,23 +207,23 @@
group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}"
with_items: "{{ client_users }}"
-- include: ../../openshift-master/restart.yml
+- import_playbook: restart.yml
# Do not restart masters when master or etcd certificates were previously expired.
when:
# masters
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ | lib_utils_oo_select_keys(groups['oo_masters_to_config'])
+ | lib_utils_oo_collect('check_results.check_results.ocp_certs')
+ | lib_utils_oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ | lib_utils_oo_select_keys(groups['oo_masters_to_config'])
+ | lib_utils_oo_collect('check_results.check_results.ocp_certs')
+ | lib_utils_oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
# etcd
- ('expired' not in (hostvars
- | oo_select_keys(groups['etcd'])
- | oo_collect('check_results.check_results.etcd')
- | oo_collect('health')))
+ | lib_utils_oo_select_keys(groups['etcd'])
+ | lib_utils_oo_collect('check_results.check_results.etcd')
+ | lib_utils_oo_collect('health')))
- name: Distribute OpenShift CA certificate to nodes
hosts: oo_nodes_to_config
@@ -275,29 +272,29 @@
state: absent
changed_when: false
-- include: ../../openshift-node/restart.yml
+- import_playbook: ../../openshift-node/private/restart.yml
# Do not restart nodes when node, master or etcd certificates were previously expired.
when:
# nodes
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"}))
+ | lib_utils_oo_select_keys(groups['oo_nodes_to_config'])
+ | lib_utils_oo_collect('check_results.check_results.ocp_certs')
+ | lib_utils_oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"}))
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"}))
+ | lib_utils_oo_select_keys(groups['oo_nodes_to_config'])
+ | lib_utils_oo_collect('check_results.check_results.ocp_certs')
+ | lib_utils_oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"}))
# masters
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ | lib_utils_oo_select_keys(groups['oo_masters_to_config'])
+ | lib_utils_oo_collect('check_results.check_results.ocp_certs')
+ | lib_utils_oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ | lib_utils_oo_select_keys(groups['oo_masters_to_config'])
+ | lib_utils_oo_collect('check_results.check_results.ocp_certs')
+ | lib_utils_oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
# etcd
- ('expired' not in (hostvars
- | oo_select_keys(groups['etcd'])
- | oo_collect('check_results.check_results.etcd')
- | oo_collect('health')))
+ | lib_utils_oo_select_keys(groups['etcd'])
+ | lib_utils_oo_collect('check_results.check_results.etcd')
+ | lib_utils_oo_collect('health')))
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/openshift-master/private/restart.yml
index 4d73b8124..5cb284935 100644
--- a/playbooks/common/openshift-master/restart.yml
+++ b/playbooks/openshift-master/private/restart.yml
@@ -1,5 +1,5 @@
---
-- include: validate_restart.yml
+- import_playbook: validate_restart.yml
- name: Restart masters
hosts: oo_masters_to_config
@@ -7,13 +7,12 @@
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
handlers:
- - include: ../../../roles/openshift_master/handlers/main.yml
- static: yes
+ - import_tasks: ../../../roles/openshift_master/handlers/main.yml
roles:
- openshift_facts
post_tasks:
- - include: restart_hosts.yml
+ - include_tasks: tasks/restart_hosts.yml
when: openshift_rolling_restart_mode | default('services') == 'system'
- - include: restart_services.yml
+ - include_tasks: tasks/restart_services.yml
when: openshift_rolling_restart_mode | default('services') == 'services'
diff --git a/playbooks/openshift-master/private/revert-client-ca.yml b/playbooks/openshift-master/private/revert-client-ca.yml
new file mode 100644
index 000000000..9ae23bf5b
--- /dev/null
+++ b/playbooks/openshift-master/private/revert-client-ca.yml
@@ -0,0 +1,17 @@
+---
+- name: Set servingInfo.clientCA = ca.crt in master config
+ hosts: oo_masters_to_config
+ tasks:
+ - name: Read master config
+ slurp:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ register: g_master_config_output
+
+ # servingInfo.clientCA may be set as the client-ca-bundle.crt from
+ # CA redeployment and this task reverts that change.
+ - name: Set servingInfo.clientCA = ca.crt in master config
+ modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: servingInfo.clientCA
+ yaml_value: ca.crt
+ when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt'
diff --git a/playbooks/common/openshift-glusterfs/roles b/playbooks/openshift-master/private/roles
index e2b799b9d..e2b799b9d 120000
--- a/playbooks/common/openshift-glusterfs/roles
+++ b/playbooks/openshift-master/private/roles
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/openshift-master/private/scaleup.yml
index f4dc9df8a..007b23ea3 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/openshift-master/private/scaleup.yml
@@ -20,18 +20,19 @@
- restart master controllers
handlers:
- name: restart master api
- service: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ service: name={{ openshift_service_type }}-master-controllers state=restarted
notify: verify api server
+ # We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- service: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
+ retries: 3
+ delay: 5
+ register: result
+ until: result.rc == 0
- name: verify api server
command: >
curl --silent --tlsv1.2
- {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
--cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {% else %}
- --cacert {{ openshift.common.config_base }}/master/ca.crt
- {% endif %}
{{ openshift.master.api_url }}/healthz/ready
args:
# Disables the following warning:
@@ -43,14 +44,14 @@
delay: 1
changed_when: false
-- include: ../openshift-master/set_network_facts.yml
+- import_playbook: set_network_facts.yml
-- include: ../openshift-etcd/certificates.yml
+- import_playbook: ../../openshift-etcd/private/certificates.yml
-- include: ../openshift-master/config.yml
+- import_playbook: config.yml
-- include: ../openshift-loadbalancer/config.yml
+- import_playbook: ../../openshift-loadbalancer/private/config.yml
-- include: ../openshift-node/certificates.yml
+- import_playbook: ../../openshift-node/private/certificates.yml
-- include: ../openshift-node/config.yml
+- import_playbook: ../../openshift-node/private/config.yml
diff --git a/playbooks/common/openshift-master/set_network_facts.yml b/playbooks/openshift-master/private/set_network_facts.yml
index 9a6cf26fc..9a6cf26fc 100644
--- a/playbooks/common/openshift-master/set_network_facts.yml
+++ b/playbooks/openshift-master/private/set_network_facts.yml
diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/openshift-master/private/tasks/restart_hosts.yml
index a5dbe0590..a5dbe0590 100644
--- a/playbooks/common/openshift-master/restart_hosts.yml
+++ b/playbooks/openshift-master/private/tasks/restart_hosts.yml
diff --git a/playbooks/openshift-master/private/tasks/restart_services.yml b/playbooks/openshift-master/private/tasks/restart_services.yml
new file mode 100644
index 000000000..cf2c282e3
--- /dev/null
+++ b/playbooks/openshift-master/private/tasks/restart_services.yml
@@ -0,0 +1,4 @@
+---
+- import_role:
+ name: openshift_master
+ tasks_from: restart.yml
diff --git a/playbooks/common/openshift-master/tasks/wire_aggregator.yml b/playbooks/openshift-master/private/tasks/wire_aggregator.yml
index 560eea785..59e2b515c 100644
--- a/playbooks/common/openshift-master/tasks/wire_aggregator.yml
+++ b/playbooks/openshift-master/private/tasks/wire_aggregator.yml
@@ -21,7 +21,7 @@
# TODO: this currently has a bug where hostnames are required
- name: Creating First Master Aggregator signer certs
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm ca create-signer-cert
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm ca create-signer-cert
--cert=/etc/origin/master/front-proxy-ca.crt
--key=/etc/origin/master/front-proxy-ca.key
--serial=/etc/origin/master/ca.serial.txt
@@ -84,7 +84,7 @@
- block:
- name: Create first master api-client config for Aggregator
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm create-api-client-config
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm create-api-client-config
--certificate-authority=/etc/origin/master/front-proxy-ca.crt
--signer-cert=/etc/origin/master/front-proxy-ca.crt
--signer-key=/etc/origin/master/front-proxy-ca.key
@@ -136,9 +136,15 @@
when:
- not front_proxy_kubeconfig.stat.exists
-- name: copy tech preview extension file for service console UI
- copy:
- src: openshift-ansible-catalog-console.js
+- name: Delete temp directory
+ file:
+ name: "{{ certtemp.stdout }}"
+ state: absent
+ changed_when: False
+
+- name: Setup extension file for service console UI
+ template:
+ src: ../templates/openshift-ansible-catalog-console.js
dest: /etc/origin/master/openshift-ansible-catalog-console.js
- name: Update master config
@@ -174,27 +180,26 @@
#restart master serially here
- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
when:
- yedit_output.changed
- - openshift.master.cluster_method == 'native'
+# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
+ retries: 3
+ delay: 5
+ register: result
+ until: result.rc == 0
when:
- yedit_output.changed
- - openshift.master.cluster_method == 'native'
- name: Verify API Server
# Using curl here since the uri module requires python-httplib2 and
# wait_for port doesn't provide health information.
command: >
curl --silent --tlsv1.2
- {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
--cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {% else %}
- --cacert {{ openshift.common.config_base }}/master/ca.crt
- {% endif %}
{{ openshift.master.api_url }}/healthz/ready
args:
# Disables the following warning:
@@ -207,9 +212,3 @@
changed_when: false
when:
- yedit_output.changed
-
-- name: Delete temp directory
- file:
- name: "{{ certtemp.stdout }}"
- state: absent
- changed_when: False
diff --git a/playbooks/openshift-master/private/templates/openshift-ansible-catalog-console.js b/playbooks/openshift-master/private/templates/openshift-ansible-catalog-console.js
new file mode 100644
index 000000000..fd02325ba
--- /dev/null
+++ b/playbooks/openshift-master/private/templates/openshift-ansible-catalog-console.js
@@ -0,0 +1 @@
+window.OPENSHIFT_CONSTANTS.TEMPLATE_SERVICE_BROKER_ENABLED = {{ 'true' if (template_service_broker_install | default(True)) else 'false' }};
diff --git a/playbooks/common/openshift-master/validate_restart.yml b/playbooks/openshift-master/private/validate_restart.yml
index 5dbb21502..1077d0b9c 100644
--- a/playbooks/common/openshift-master/validate_restart.yml
+++ b/playbooks/openshift-master/private/validate_restart.yml
@@ -14,9 +14,6 @@
- role: common
local_facts:
rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}"
- - role: master
- local_facts:
- cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
# Creating a temp file on localhost, we then check each system that will
# be rebooted to see if that file exists, if so we know we're running
diff --git a/playbooks/openshift-master/redeploy-certificates.yml b/playbooks/openshift-master/redeploy-certificates.yml
new file mode 100644
index 000000000..8b7272485
--- /dev/null
+++ b/playbooks/openshift-master/redeploy-certificates.yml
@@ -0,0 +1,6 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/redeploy-certificates.yml
+
+- import_playbook: private/restart.yml
diff --git a/playbooks/openshift-master/redeploy-openshift-ca.yml b/playbooks/openshift-master/redeploy-openshift-ca.yml
new file mode 100644
index 000000000..27f4e6b7d
--- /dev/null
+++ b/playbooks/openshift-master/redeploy-openshift-ca.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/redeploy-openshift-ca.yml
diff --git a/playbooks/openshift-master/restart.yml b/playbooks/openshift-master/restart.yml
new file mode 100644
index 000000000..041c1384d
--- /dev/null
+++ b/playbooks/openshift-master/restart.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/restart.yml
diff --git a/playbooks/openshift-master/scaleup.yml b/playbooks/openshift-master/scaleup.yml
new file mode 100644
index 000000000..f717cd0e9
--- /dev/null
+++ b/playbooks/openshift-master/scaleup.yml
@@ -0,0 +1,23 @@
+---
+- import_playbook: ../init/evaluate_groups.yml
+
+- name: Ensure there are new_masters or new_nodes
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: >
+ Detected no new_masters or no new_nodes in inventory. Please
+ add hosts to the new_masters and new_nodes host groups to add
+ masters.
+ when:
+ - g_new_master_hosts | default([]) | length == 0
+ - g_new_node_hosts | default([]) | length == 0
+
+# Need a better way to do the above check for node without
+# running evaluate_groups and init/main.yml
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/scaleup.yml
diff --git a/playbooks/openshift-metrics/config.yml b/playbooks/openshift-metrics/config.yml
new file mode 100644
index 000000000..c7814207c
--- /dev/null
+++ b/playbooks/openshift-metrics/config.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/config.yml
diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/openshift-metrics/private/config.yml
index 9c0bd489b..1e237e3f0 100644
--- a/playbooks/common/openshift-cluster/openshift_metrics.yml
+++ b/playbooks/openshift-metrics/private/config.yml
@@ -1,34 +1,39 @@
---
- name: Metrics Install Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Metrics install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_metrics: "In Progress"
- aggregate: false
+ installer_phase_metrics:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- name: OpenShift Metrics
hosts: oo_first_master
roles:
- role: openshift_metrics
+# TODO: Remove when master config property is removed
- name: OpenShift Metrics
hosts: oo_masters:!oo_first_master
serial: 1
tasks:
- name: Setup the non-first masters configs
- include_role:
+ import_role:
name: openshift_metrics
tasks_from: update_master_config.yaml
- name: Metrics Install Checkpoint End
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Metrics install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_metrics: "Complete"
- aggregate: false
+ installer_phase_metrics:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/openshift-metrics/private/roles b/playbooks/openshift-metrics/private/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/openshift-metrics/private/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/openshift-nfs/config.yml b/playbooks/openshift-nfs/config.yml
new file mode 100644
index 000000000..c7814207c
--- /dev/null
+++ b/playbooks/openshift-nfs/config.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/config.yml
diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/openshift-nfs/private/config.yml
index ce672daf5..3625efcc6 100644
--- a/playbooks/common/openshift-nfs/config.yml
+++ b/playbooks/openshift-nfs/private/config.yml
@@ -1,26 +1,29 @@
---
- name: NFS Install Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set NFS install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_nfs: "In Progress"
- aggregate: false
+ installer_phase_nfs:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- name: Configure nfs
hosts: oo_nfs_to_config
roles:
- - role: os_firewall
- role: openshift_storage_nfs
- name: NFS Install Checkpoint End
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set NFS install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_nfs: "Complete"
- aggregate: false
+ installer_phase_nfs:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-loadbalancer/roles b/playbooks/openshift-nfs/private/roles
index e2b799b9d..e2b799b9d 120000
--- a/playbooks/common/openshift-loadbalancer/roles
+++ b/playbooks/openshift-nfs/private/roles
diff --git a/playbooks/openshift-node/certificates.yml b/playbooks/openshift-node/certificates.yml
new file mode 100644
index 000000000..7ae87c09a
--- /dev/null
+++ b/playbooks/openshift-node/certificates.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/certificates.yml
diff --git a/playbooks/openshift-node/config.yml b/playbooks/openshift-node/config.yml
new file mode 100644
index 000000000..c7814207c
--- /dev/null
+++ b/playbooks/openshift-node/config.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/config.yml
diff --git a/playbooks/openshift-node/network_manager.yml b/playbooks/openshift-node/network_manager.yml
new file mode 100644
index 000000000..88a93952d
--- /dev/null
+++ b/playbooks/openshift-node/network_manager.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/evaluate_groups.yml
+
+- import_playbook: private/network_manager.yml
diff --git a/playbooks/common/openshift-node/additional_config.yml b/playbooks/openshift-node/private/additional_config.yml
index ac757397b..0881121c9 100644
--- a/playbooks/common/openshift-node/additional_config.yml
+++ b/playbooks/openshift-node/private/additional_config.yml
@@ -24,7 +24,7 @@
key: oo_nodes_use_{{ (openshift_use_kuryr | default(False)) | ternary('kuryr','nothing') }}
changed_when: False
-- include: etcd_client_config.yml
+- import_playbook: etcd_client_config.yml
vars:
openshift_node_scale_up_group: "oo_nodes_use_flannel:oo_nodes_use_calico:oo_nodes_use_contiv:oo_nodes_use_kuryr"
@@ -33,7 +33,6 @@
roles:
- role: flannel
etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
when: openshift_use_flannel | default(false) | bool
- name: Additional node config
@@ -48,17 +47,23 @@
- role: nuage_node
when: openshift_use_nuage | default(false) | bool
-- name: Additional node config
- hosts: oo_nodes_use_contiv
+- name: Configure Contiv masters
+ hosts: oo_masters_to_config
+ roles:
+ - role: contiv
+ contiv_master: true
+ when: openshift_use_contiv | default(false) | bool
+
+- name: Configure rest of Contiv nodes
+ hosts: "{{ groups.oo_nodes_use_contiv | default([]) | difference(groups.oo_masters_to_config) }}"
roles:
- role: contiv
- contiv_role: netplugin
when: openshift_use_contiv | default(false) | bool
- name: Configure Kuryr node
hosts: oo_nodes_use_kuryr
tasks:
- - include_role:
+ - import_role:
name: kuryr
tasks_from: node
when: openshift_use_kuryr | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml b/playbooks/openshift-node/private/certificates-backup.yml
index 2ad84b3b9..2ad84b3b9 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml
+++ b/playbooks/openshift-node/private/certificates-backup.yml
diff --git a/playbooks/common/openshift-node/certificates.yml b/playbooks/openshift-node/private/certificates.yml
index 908885ee6..908885ee6 100644
--- a/playbooks/common/openshift-node/certificates.yml
+++ b/playbooks/openshift-node/private/certificates.yml
diff --git a/playbooks/openshift-node/private/clean_image.yml b/playbooks/openshift-node/private/clean_image.yml
new file mode 100644
index 000000000..38753d0af
--- /dev/null
+++ b/playbooks/openshift-node/private/clean_image.yml
@@ -0,0 +1,10 @@
+---
+- name: Configure nodes
+ hosts: oo_nodes_to_config:!oo_containerized_master_nodes
+ tasks:
+ - name: Remove any ansible facts created during AMI creation
+ file:
+ path: "/etc/ansible/facts.d/{{ item }}"
+ state: absent
+ with_items:
+ - openshift.fact
diff --git a/playbooks/openshift-node/private/config.yml b/playbooks/openshift-node/private/config.yml
new file mode 100644
index 000000000..300a90371
--- /dev/null
+++ b/playbooks/openshift-node/private/config.yml
@@ -0,0 +1,38 @@
+---
+- name: Node Install Checkpoint Start
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Set Node install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_node:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- import_playbook: certificates.yml
+
+- import_playbook: setup.yml
+
+- import_playbook: containerized_nodes.yml
+
+- import_playbook: configure_nodes.yml
+
+- import_playbook: additional_config.yml
+
+- import_playbook: manage_node.yml
+
+- import_playbook: enable_excluders.yml
+
+- name: Node Install Checkpoint End
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Set Node install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_node:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-node/configure_nodes.yml b/playbooks/openshift-node/private/configure_nodes.yml
index 17259422d..a13173e63 100644
--- a/playbooks/common/openshift-node/configure_nodes.yml
+++ b/playbooks/openshift-node/private/configure_nodes.yml
@@ -4,14 +4,14 @@
vars:
openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
| union(groups['oo_masters_to_config'])
| union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
roles:
- - role: os_firewall
+ - role: openshift_clock
+ - role: openshift_cloud_provider
- role: openshift_node
- role: tuned
- role: nickhammond.logrotate
diff --git a/playbooks/common/openshift-node/containerized_nodes.yml b/playbooks/openshift-node/private/containerized_nodes.yml
index 6fac937e3..644e6a69c 100644
--- a/playbooks/common/openshift-node/containerized_nodes.yml
+++ b/playbooks/openshift-node/private/containerized_nodes.yml
@@ -5,15 +5,15 @@
vars:
openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
| union(groups['oo_masters_to_config'])
| union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
roles:
- - role: os_firewall
+ - role: openshift_clock
+ - role: openshift_cloud_provider
- role: openshift_node
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- role: nickhammond.logrotate
diff --git a/playbooks/common/openshift-node/enable_excluders.yml b/playbooks/openshift-node/private/enable_excluders.yml
index 5288b14f9..30713e694 100644
--- a/playbooks/common/openshift-node/enable_excluders.yml
+++ b/playbooks/openshift-node/private/enable_excluders.yml
@@ -5,4 +5,3 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-node/etcd_client_config.yml b/playbooks/openshift-node/private/etcd_client_config.yml
index c3fa38a81..c3fa38a81 100644
--- a/playbooks/common/openshift-node/etcd_client_config.yml
+++ b/playbooks/openshift-node/private/etcd_client_config.yml
diff --git a/playbooks/openshift-node/private/image_prep.yml b/playbooks/openshift-node/private/image_prep.yml
new file mode 100644
index 000000000..adcbb0fdb
--- /dev/null
+++ b/playbooks/openshift-node/private/image_prep.yml
@@ -0,0 +1,26 @@
+---
+- name: normalize groups
+ import_playbook: ../../prerequisites.yml
+ vars:
+ skip_version: True
+ skip_sanity_checks: True
+ skip_validate_hostnames: True
+
+- name: run node config setup
+ import_playbook: setup.yml
+
+- name: run node config
+ import_playbook: configure_nodes.yml
+
+- name: node bootstrap config
+ hosts: oo_nodes_to_config:!oo_containerized_master_nodes
+ tasks:
+ - import_role:
+ name: openshift_node
+ tasks_from: bootstrap.yml
+
+- name: Re-enable excluders
+ import_playbook: enable_excluders.yml
+
+- name: Remove any undesired artifacts from build
+ import_playbook: clean_image.yml
diff --git a/playbooks/common/openshift-node/manage_node.yml b/playbooks/openshift-node/private/manage_node.yml
index f48a19a9c..121c54a3d 100644
--- a/playbooks/common/openshift-node/manage_node.yml
+++ b/playbooks/openshift-node/private/manage_node.yml
@@ -6,6 +6,7 @@
roles:
- role: openshift_manage_node
openshift_master_host: "{{ groups.oo_first_master.0 }}"
+ openshift_manage_node_is_master: "{{ ('oo_masters_to_config' in group_names) | bool }}"
tasks:
- name: Create group for deployment type
group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
diff --git a/playbooks/common/openshift-node/network_manager.yml b/playbooks/openshift-node/private/network_manager.yml
index b3a7399dc..2638c5223 100644
--- a/playbooks/common/openshift-node/network_manager.yml
+++ b/playbooks/openshift-node/private/network_manager.yml
@@ -1,6 +1,4 @@
---
-- include: ../openshift-cluster/evaluate_groups.yml
-
- name: Install and configure NetworkManager
hosts: oo_all_hosts
become: yes
@@ -9,6 +7,8 @@
package:
name: 'NetworkManager'
state: present
+ register: result
+ until: result is succeeded
- name: configure NetworkManager
lineinfile:
diff --git a/playbooks/openshift-node/private/redeploy-certificates.yml b/playbooks/openshift-node/private/redeploy-certificates.yml
new file mode 100644
index 000000000..c0f75ae80
--- /dev/null
+++ b/playbooks/openshift-node/private/redeploy-certificates.yml
@@ -0,0 +1,6 @@
+---
+- import_playbook: certificates-backup.yml
+
+- import_playbook: certificates.yml
+ vars:
+ openshift_certificates_redeploy: true
diff --git a/playbooks/common/openshift-node/restart.yml b/playbooks/openshift-node/private/restart.yml
index c3beb59b7..7249ced70 100644
--- a/playbooks/common/openshift-node/restart.yml
+++ b/playbooks/openshift-node/private/restart.yml
@@ -5,6 +5,7 @@
roles:
- lib_openshift
+ - openshift_facts
tasks:
- name: Restart docker
@@ -12,14 +13,10 @@
name: docker
state: restarted
register: l_docker_restart_docker_in_node_result
- until: not l_docker_restart_docker_in_node_result | failed
+ until: not (l_docker_restart_docker_in_node_result is failed)
retries: 3
delay: 30
- - name: Update docker facts
- openshift_facts:
- role: docker
-
- name: Restart containerized services
service:
name: "{{ item }}"
@@ -27,11 +24,11 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
failed_when: false
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
- name: Wait for master API to come back online
wait_for:
@@ -44,7 +41,7 @@
- name: restart node
service:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
state: restarted
- name: Wait for node to be ready
diff --git a/playbooks/common/openshift-master/roles b/playbooks/openshift-node/private/roles
index e2b799b9d..e2b799b9d 120000
--- a/playbooks/common/openshift-master/roles
+++ b/playbooks/openshift-node/private/roles
diff --git a/playbooks/common/openshift-node/setup.yml b/playbooks/openshift-node/private/setup.yml
index 794c03a67..802dce37e 100644
--- a/playbooks/common/openshift-node/setup.yml
+++ b/playbooks/openshift-node/private/setup.yml
@@ -5,7 +5,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Evaluate node groups
hosts: localhost
@@ -22,6 +21,6 @@
when:
- hostvars[item].openshift is defined
- hostvars[item].openshift.common is defined
- - hostvars[item].openshift.common.is_containerized | bool
+ - hostvars[item].openshift_is_containerized | bool
- (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
changed_when: False
diff --git a/playbooks/openshift-node/redeploy-certificates.yml b/playbooks/openshift-node/redeploy-certificates.yml
new file mode 100644
index 000000000..8b7272485
--- /dev/null
+++ b/playbooks/openshift-node/redeploy-certificates.yml
@@ -0,0 +1,6 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/redeploy-certificates.yml
+
+- import_playbook: private/restart.yml
diff --git a/playbooks/openshift-node/restart.yml b/playbooks/openshift-node/restart.yml
new file mode 100644
index 000000000..041c1384d
--- /dev/null
+++ b/playbooks/openshift-node/restart.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/restart.yml
diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/openshift-node/scaleup.yml
index e0c36fb69..bdfd3d3e6 100644
--- a/playbooks/byo/openshift-node/scaleup.yml
+++ b/playbooks/openshift-node/scaleup.yml
@@ -1,5 +1,5 @@
---
-- include: ../openshift-cluster/initialize_groups.yml
+- import_playbook: ../init/evaluate_groups.yml
- name: Ensure there are new_nodes
hosts: localhost
@@ -14,6 +14,8 @@
when:
- g_new_node_hosts | default([]) | length == 0
-- include: ../../common/openshift-cluster/std_include.yml
+# Need a better way to do the above check for node without
+# running evaluate_groups and init/main.yml
+- import_playbook: ../init/main.yml
-- include: ../../common/openshift-node/config.yml
+- import_playbook: private/config.yml
diff --git a/playbooks/openshift-prometheus/config.yml b/playbooks/openshift-prometheus/config.yml
new file mode 100644
index 000000000..c7814207c
--- /dev/null
+++ b/playbooks/openshift-prometheus/config.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/config.yml
diff --git a/playbooks/openshift-prometheus/private/config.yml b/playbooks/openshift-prometheus/private/config.yml
new file mode 100644
index 000000000..d13261a7a
--- /dev/null
+++ b/playbooks/openshift-prometheus/private/config.yml
@@ -0,0 +1,29 @@
+---
+- name: Prometheus Install Checkpoint Start
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Set Prometheus install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_prometheus:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- name: OpenShift Prometheus
+ hosts: oo_first_master
+ roles:
+ - role: openshift_prometheus
+
+- name: Prometheus Install Checkpoint End
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Set Prometheus install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_prometheus:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/openshift-prometheus/private/roles b/playbooks/openshift-prometheus/private/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/openshift-prometheus/private/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/openshift-provisioners/config.yml b/playbooks/openshift-provisioners/config.yml
new file mode 100644
index 000000000..c7814207c
--- /dev/null
+++ b/playbooks/openshift-provisioners/config.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/config.yml
diff --git a/playbooks/common/openshift-cluster/openshift_provisioners.yml b/playbooks/openshift-provisioners/private/config.yml
index b1ca6f606..b1ca6f606 100644
--- a/playbooks/common/openshift-cluster/openshift_provisioners.yml
+++ b/playbooks/openshift-provisioners/private/config.yml
diff --git a/playbooks/openshift-provisioners/private/roles b/playbooks/openshift-provisioners/private/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/openshift-provisioners/private/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/openshift-service-catalog/config.yml b/playbooks/openshift-service-catalog/config.yml
new file mode 100644
index 000000000..c7814207c
--- /dev/null
+++ b/playbooks/openshift-service-catalog/config.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/config.yml
diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/openshift-service-catalog/private/config.yml
index bd964b2ce..7bb8511f6 100644
--- a/playbooks/common/openshift-cluster/service_catalog.yml
+++ b/playbooks/openshift-service-catalog/private/config.yml
@@ -1,13 +1,15 @@
---
- name: Service Catalog Install Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Service Catalog install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_servicecatalog: "In Progress"
- aggregate: false
+ installer_phase_servicecatalog:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- name: Service Catalog
hosts: oo_first_master
@@ -19,11 +21,13 @@
first_master: "{{ groups.oo_first_master[0] }}"
- name: Service Catalog Install Checkpoint End
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Service Catalog install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_servicecatalog: "Complete"
- aggregate: false
+ installer_phase_servicecatalog:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/openshift-service-catalog/private/roles b/playbooks/openshift-service-catalog/private/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/openshift-service-catalog/private/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/openshift-web-console/config.yml b/playbooks/openshift-web-console/config.yml
new file mode 100644
index 000000000..c7814207c
--- /dev/null
+++ b/playbooks/openshift-web-console/config.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/config.yml
diff --git a/playbooks/openshift-web-console/private/config.yml b/playbooks/openshift-web-console/private/config.yml
new file mode 100644
index 000000000..ffd702d20
--- /dev/null
+++ b/playbooks/openshift-web-console/private/config.yml
@@ -0,0 +1,31 @@
+---
+- name: Web Console Install Checkpoint Start
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Set Web Console install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_web_console:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- name: Web Console
+ hosts: oo_first_master
+ roles:
+ - openshift_web_console
+ vars:
+ first_master: "{{ groups.oo_first_master[0] }}"
+
+- name: Web Console Install Checkpoint End
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Set Web Console install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_web_console:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-nfs/roles b/playbooks/openshift-web-console/private/roles
index e2b799b9d..e2b799b9d 120000
--- a/playbooks/common/openshift-nfs/roles
+++ b/playbooks/openshift-web-console/private/roles
diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md
new file mode 100644
index 000000000..d361d6278
--- /dev/null
+++ b/playbooks/openstack/README.md
@@ -0,0 +1,235 @@
+# OpenStack Provisioning
+
+This directory contains [Ansible][ansible] playbooks and roles to create
+OpenStack resources (servers, networking, volumes, security groups,
+etc.). The result is an environment ready for OpenShift installation
+via [openshift-ansible].
+
+We provide everything necessary to be able to install OpenShift on
+OpenStack (including the load balancer servers when
+necessary). In addition, we work on providing integration with the
+OpenStack-native services (storage, lbaas, baremetal as a service,
+dns, etc.).
+
+
+## OpenStack Requirements
+
+Before you start the installation, you need to have an OpenStack
+environment to connect to. You can use a public cloud or an OpenStack
+within your organisation. It is also possible to
+use [Devstack][devstack] or [TripleO][tripleo]. In the case of
+TripleO, we will be running on top of the **overcloud**.
+
+The OpenStack release must be Newton (for Red Hat OpenStack this is
+version 10) or newer. It must also satisfy these requirements:
+
+* Heat (Orchestration) must be available
+* The deployment image (CentOS 7.4 or RHEL 7) must be loaded
+* The deployment flavor must be available to your user
+ - `m1.medium` / 4GB RAM + 40GB disk should be enough for testing
+ - look at
+ the [Minimum Hardware Requirements page][hardware-requirements]
+ for production
+* The keypair for SSH must be available in openstack
+* `keystonerc` file that lets you talk to the openstack services
+ * NOTE: only Keystone V2 is currently supported
+
+Optional:
+* External Neutron network with a floating IP address pool
+
+
+
+## Installation
+
+There are four main parts to the installation:
+
+1. [Preparing Ansible and dependencies](#1-preparing-ansible-and-dependencies)
+2. [Configuring the desired OpenStack environment and OpenShift cluster](#2-configuring-the-openstack-environment-and-openshift-cluster)
+3. [Creating the OpenStack Resources and Installing OpenShift](#3-creating-the-openstack-resources-and-installing-openshift)
+
+This guide is going to install [OpenShift Origin][origin]
+with [CentOS 7][centos7] images with minimal customisation.
+
+We will create the VMs for running OpenShift, in a new Neutron network and
+assign Floating IP addresses.
+
+The OpenShift cluster will have a single Master node that will run
+`etcd`, a single Infra node and two App nodes.
+
+You can look at
+the [Advanced Configuration page][advanced-configuration] for
+additional options.
+
+
+
+### 1. Preparing Ansible and dependencies
+
+First, you need to select where to run [Ansible][ansible] from (the
+*Ansible host*). This can be the computer you read this guide on or an
+OpenStack VM you'll create specifically for this purpose.
+
+We will use
+a
+[Docker image that has all the dependencies installed][control-host-image] to
+make things easier. If you don't want to use Docker, take a look at
+the [Ansible host dependencies][ansible-dependencies] and make sure
+they're installed.
+
+Your *Ansible host* needs to have the following:
+
+1. Docker
+2. `keystonerc` file with your OpenStack credentials
+3. SSH private key for logging in to your OpenShift nodes
+
+Assuming your private key is `~/.ssh/id_rsa` and `keystonerc` in your
+current directory:
+
+```bash
+$ sudo docker run -it -v ~/.ssh:/mnt/.ssh:Z \
+ -v $PWD/keystonerc:/root/.config/openstack/keystonerc.sh:Z \
+ redhatcop/control-host-openstack bash
+```
+
+This will create the container, add your SSH key and source your
+`keystonerc`. It should be set up for the installation.
+
+You can verify that everything is in order:
+
+
+```bash
+$ less .ssh/id_rsa
+$ ansible --version
+$ openstack image list
+```
+
+
+### 2. Configuring the OpenStack Environment and OpenShift Cluster
+
+The configuration is all done in an Ansible inventory directory. We
+will clone the [openshift-ansible][openshift-ansible] repository and set
+things up for a minimal installation.
+
+
+```
+$ git clone https://github.com/openshift/openshift-ansible
+$ cp -r openshift-ansible/playbooks/openstack/sample-inventory/ inventory
+```
+
+If you're testing multiple configurations, you can have multiple
+inventories and switch between them.
+
+#### OpenStack Configuration
+
+The OpenStack configuration is in `inventory/group_vars/all.yml`.
+
+Open the file and plug in the image, flavor and network configuration
+corresponding to your OpenStack installation.
+
+```bash
+$ vi inventory/group_vars/all.yml
+```
+
+1. Set the `openshift_openstack_keypair_name` to your OpenStack keypair name.
+ - See `openstack keypair list` to find the keypairs registered with
+ OpenShift.
+ - This must correspond to your private SSH key in `~/.ssh/id_rsa`
+2. Set the `openshift_openstack_external_network_name` to the floating IP
+ network of your openstack.
+ - See `openstack network list` for the list of networks.
+ - It's often called `public`, `external` or `ext-net`.
+3. Set the `openshift_openstack_default_image_name` to the image you want your
+ OpenShift VMs to run.
+ - See `openstack image list` for the list of available images.
+4. Set the `openshift_openstack_default_flavor` to the flavor you want your
+ OpenShift VMs to use.
+ - See `openstack flavor list` for the list of available flavors.
+
+
+
+#### OpenShift configuration
+
+The OpenShift configuration is in `inventory/group_vars/OSEv3.yml`.
+
+The default options will mostly work, but unless you used the large
+flavors for a production-ready environment, openshift-ansible's
+hardware check will fail.
+
+Let's disable those checks by putting this in
+`inventory/group_vars/OSEv3.yml`:
+
+```yaml
+openshift_disable_check: disk_availability,memory_availability
+```
+
+**NOTE**: The default authentication method will allow **any username
+and password** in! If you're running this in a public place, you need
+to set up access control.
+
+Feel free to look at
+the [Sample OpenShift Inventory][sample-openshift-inventory] and
+the [advanced configuration][advanced-configuration].
+
+
+### 3. Creating the OpenStack Resources and Installing OpenShift
+
+We provide an `ansible.cfg` file which has some useful defaults -- you should
+copy it to the directory you're going to run `ansible-playbook` from.
+
+```bash
+$ cp openshift-ansible/ansible.cfg ansible.cfg
+```
+
+Then run the provision + install playbook -- this will create the OpenStack
+resources:
+
+```bash
+$ ansible-playbook --user openshift -i inventory \
+ openshift-ansible/playbooks/openstack/openshift-cluster/provision_install.yaml \
+ -e openshift_repos_enable_testing=true
+```
+
+Note, you may want to use the testing repo for development purposes only.
+Normally, `openshift_repos_enable_testing` should not be specified.
+
+If you're using multiple inventories, make sure you pass the path to
+the right one to `-i`.
+
+If your SSH private key is not in `~/.ssh/id_rsa` use the `--private-key`
+option to specify the correct path.
+
+
+
+### Next Steps
+
+And that's it! You should have a small but functional OpenShift
+cluster now.
+
+Take a look at [how to access the cluster][accessing-openshift]
+and [how to remove it][uninstall-openshift] as well as the more
+advanced configuration:
+
+* [Accessing the OpenShift cluster][accessing-openshift]
+* [Removing the OpenShift cluster][uninstall-openshift]
+* Set Up Authentication (TODO)
+* [Multiple Masters with a load balancer][loadbalancer]
+* [External Dns][external-dns]
+* Multiple Clusters (TODO)
+* [Cinder Registry][cinder-registry]
+
+
+[ansible]: https://www.ansible.com/
+[openshift-ansible]: https://github.com/openshift/openshift-ansible
+[devstack]: https://docs.openstack.org/devstack/
+[tripleo]: http://tripleo.org/
+[ansible-dependencies]: ./advanced-configuration.md#dependencies-for-localhost-ansible-controladmin-node
+[control-host-image]: https://hub.docker.com/r/redhatcop/control-host-openstack/
+[hardware-requirements]: https://docs.openshift.org/latest/install_config/install/prerequisites.html#hardware
+[origin]: https://www.openshift.org/
+[centos7]: https://www.centos.org/
+[sample-openshift-inventory]: https://github.com/openshift/openshift-ansible/blob/master/inventory/hosts.example
+[advanced-configuration]: ./advanced-configuration.md
+[accessing-openshift]: ./advanced-configuration.md#accessing-the-openshift-cluster
+[uninstall-openshift]: ./advanced-configuration.md#removing-the-openshift-cluster
+[loadbalancer]: ./advanced-configuration.md#multi-master-configuration
+[external-dns]: ./advanced-configuration.md#dns-configuration-variables
+[cinder-registry]: ./advanced-configuration.md#creating-and-using-a-cinder-volume-for-the-openshift-registry
diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md
new file mode 100644
index 000000000..2c9b70b5f
--- /dev/null
+++ b/playbooks/openstack/advanced-configuration.md
@@ -0,0 +1,641 @@
+## Dependencies for localhost (ansible control/admin node)
+
+* [Ansible 2.3](https://pypi.python.org/pypi/ansible)
+* [Ansible-galaxy](https://pypi.python.org/pypi/ansible-galaxy-local-deps)
+* [jinja2](http://jinja.pocoo.org/docs/2.9/)
+* [shade](https://pypi.python.org/pypi/shade)
+* python-jmespath / [jmespath](https://pypi.python.org/pypi/jmespath)
+* python-dns / [dnspython](https://pypi.python.org/pypi/dnspython)
+* Become (sudo) is not required.
+
+**NOTE**: You can use a Docker image with all dependencies set up.
+Find more in the [Deployment section](#deployment).
+
+### Optional Dependencies for localhost
+**Note**: When using rhel images, `rhel-7-server-openstack-10-rpms` repository is required in order to install these packages.
+
+* `python-openstackclient`
+* `python-heatclient`
+
+## Dependencies for OpenStack hosted cluster nodes (servers)
+
+There are no additional dependencies for the cluster nodes. Required
+configuration steps are done by Heat given a specific user data config
+that normally should not be changed.
+
+## Accessing the OpenShift Cluster
+
+### Configure DNS
+
+OpenShift requires a two public DNS records to function fully. The first one points to
+the master/load balancer and provides the UI/API access. The other one is a
+wildcard domain that resolves app route requests to the infra node. A private DNS
+server and records are not required and not managed here.
+
+If you followed the default installation from the README section, there is no
+DNS configured. You should add two entries to the `/etc/hosts` file on the
+Ansible host (where you to do a quick validation. A real deployment will
+however require a DNS server with the following entries set.
+
+First, run the `openstack server list` command and note the floating IP
+addresses of the *master* and *infra* nodes (we will use `10.40.128.130` for
+master and `10.40.128.134` for infra here).
+
+Then add the following entries to your `/etc/hosts`:
+
+```
+10.40.128.130 console.openshift.example.com
+10.40.128.134 cakephp-mysql-example-test.apps.openshift.example.com
+```
+
+This points the cluster domain (as defined in the
+`openshift_master_cluster_public_hostname` Ansible variable in `OSEv3`) to the
+master node and any routes for deployed apps to the infra node.
+
+If you deploy another app, it will end up with a different URL (e.g.
+myapp-test.apps.openshift.example.com) and you will need to add that too. This
+is why a real deployment should always run a DNS where the second entry will be
+a wildcard `*.apps.openshift.example.com).
+
+This will be sufficient to validate the cluster here.
+
+Take a look at the [External DNS](#dns-configuration-variables) section for
+configuring a DNS service.
+
+
+### Get the `oc` Client
+
+**NOTE**: You can skip this section if you're using the Docker image
+-- it already has the `oc` binary.
+
+You need to download the OpenShift command line client (called `oc`).
+You can download and extract `openshift-origin-client-tools` from the
+OpenShift release page:
+
+https://github.com/openshift/origin/releases/latest/
+
+Or you can now copy it from the master node:
+
+ $ ansible -i inventory masters[0] -m fetch -a "src=/bin/oc dest=oc"
+
+Either way, find the `oc` binary and put it in your `PATH`.
+
+
+### Logging in Using the Command Line
+
+
+```
+oc login --insecure-skip-tls-verify=true https://master-0.openshift.example.com:8443 -u user -p password
+oc new-project test
+oc new-app --template=cakephp-mysql-example
+oc status -v
+curl http://cakephp-mysql-example-test.apps.openshift.example.com
+```
+
+This will trigger an image build. You can run `oc logs -f
+bc/cakephp-mysql-example` to follow its progress.
+
+Wait until the build has finished and both pods are deployed and running:
+
+```
+$ oc status -v
+In project test on server https://master-0.openshift.example.com:8443
+
+http://cakephp-mysql-example-test.apps.openshift.example.com (svc/cakephp-mysql-example)
+ dc/cakephp-mysql-example deploys istag/cakephp-mysql-example:latest <-
+ bc/cakephp-mysql-example source builds https://github.com/openshift/cakephp-ex.git on openshift/php:7.0
+ deployment #1 deployed about a minute ago - 1 pod
+
+svc/mysql - 172.30.144.36:3306
+ dc/mysql deploys openshift/mysql:5.7
+ deployment #1 deployed 3 minutes ago - 1 pod
+
+Info:
+ * pod/cakephp-mysql-example-1-build has no liveness probe to verify pods are still running.
+ try: oc set probe pod/cakephp-mysql-example-1-build --liveness ...
+View details with 'oc describe <resource>/<name>' or list everything with 'oc get all'.
+
+```
+
+You can now look at the deployed app using its route:
+
+```
+$ curl http://cakephp-mysql-example-test.apps.openshift.example.com
+```
+
+Its `title` should say: "Welcome to OpenShift".
+
+
+### Accessing the UI
+
+You can also access the OpenShift cluster with a web browser by going to:
+
+https://master-0.openshift.example.com:8443
+
+Note that for this to work, the OpenShift nodes must be accessible
+from your computer and it's DNS configuration must use the cruster's
+DNS.
+
+
+## Removing the OpenShift Cluster
+
+Everything in the cluster is contained within a Heat stack. To
+completely remove the cluster and all the related OpenStack resources,
+run this command:
+
+```bash
+openstack stack delete --wait --yes openshift.example.com
+```
+
+
+## DNS configuration variables
+
+Pay special attention to the values in the first paragraph -- these
+will depend on your OpenStack environment.
+
+Note that the provsisioning playbooks update the original Neutron subnet
+created with the Heat stack to point to the configured DNS servers.
+So the provisioned cluster nodes will start using those natively as
+default nameservers. Technically, this allows to deploy OpenShift clusters
+without dnsmasq proxies.
+
+The `openshift_openstack_clusterid` and `openshift_openstack_public_dns_domain`
+will form the cluster's public DNS domain all your servers will be under. With
+the default values, this will be `openshift.example.com`. For workloads, the
+default subdomain is 'apps'. That sudomain can be set as well by the
+`openshift_openstack_app_subdomain` variable in the inventory.
+
+If you want to use a two sets of hostnames for public and private/prefixed DNS
+records for your externally managed public DNS server, you can specify
+`openshift_openstack_public_hostname_suffix` and/or
+`openshift_openstack_private_hostname_suffix`. The suffixes will be added
+to the nsupdate records sent to the external DNS server. Those are empty by default.
+
+**Note** the real hostnames, Nova servers' or ansible hostnames and inventory
+variables will not be updated. The deployment may be done on arbitrary named
+hosts with the hostnames managed by cloud-init. Inventory hostnames will ignore
+the suffixes.
+
+The `openstack_<role name>_hostname` is a set of variables used for customising
+public names of Nova servers provisioned with a given role. When such a variable stays commented,
+default value (usually the role name) is used.
+
+The `openshift_openstack_dns_nameservers` is a list of DNS servers accessible from all
+the created Nova servers. These will provide the internal name resolution for
+your OpenShift nodes (as well as upstream name resolution for installing
+packages, etc.).
+
+The `openshift_use_dnsmasq` controls either dnsmasq is deployed or not.
+By default, dnsmasq is deployed and comes as the hosts' /etc/resolv.conf file
+first nameserver entry that points to the local host instance of the dnsmasq
+daemon that in turn proxies DNS requests to the authoritative DNS server.
+When Network Manager is enabled for provisioned cluster nodes, which is
+normally the case, you should not change the defaults and always deploy dnsmasq.
+
+`openshift_openstack_external_nsupdate_keys` describes an external authoritative DNS server(s)
+processing dynamic records updates in the public only cluster view:
+
+ openshift_openstack_external_nsupdate_keys:
+ public:
+ key_secret: <some nsupdate key>
+ key_algorithm: 'hmac-md5'
+ key_name: 'update-key'
+ server: <public DNS server IP>
+
+Here, for the public view section, we specified another key algorithm and
+optional `key_name`, which normally defaults to the cluster's DNS domain.
+This just illustrates a compatibility mode with a DNS service deployed
+by OpenShift on OSP10 reference architecture, and used in a mixed mode with
+another external DNS server.
+
+## Flannel networking
+
+In order to configure the
+[flannel networking](https://docs.openshift.com/container-platform/3.6/install_config/configuring_sdn.html#using-flannel),
+uncomment and adjust the appropriate `inventory/group_vars/OSEv3.yml` group vars.
+Note that the `osm_cluster_network_cidr` must not overlap with the default
+Docker bridge subnet of 172.17.0.0/16. Or you should change the docker0 default
+CIDR range otherwise. For example, by adding `--bip=192.168.2.1/24` to
+`DOCKER_NETWORK_OPTIONS` located in `/etc/sysconfig/docker-network`.
+
+Also note that the flannel network will be provisioned on a separate isolated Neutron
+subnet defined from `osm_cluster_network_cidr` and having ports security disabled.
+Use the `openstack_private_data_network_name` variable to define the network
+name for the heat stack resource.
+
+After the cluster deployment done, you should run an additional post installation
+step for flannel and docker iptables configuration:
+
+ ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/post-install.yml
+
+## Other configuration variables
+
+`openshift_openstack_keypair_name` is a Nova keypair - you can see your
+keypairs with `openstack keypair list`. It must correspond to the
+private SSH key Ansible will use to log into the created VMs. This is
+`~/.ssh/id_rsa` by default, but you can use a different key by passing
+`--private-key` to `ansible-playbook`.
+
+`openshift_openstack_default_image_name` is the default name of the Glance image the
+servers will use. You can see your images with `openstack image list`.
+In order to set a different image for a role, uncomment the line with the
+corresponding variable (e.g. `openshift_openstack_lb_image_name` for load balancer) and
+set its value to another available image name. `openshift_openstack_default_image_name`
+must stay defined as it is used as a default value for the rest of the roles.
+
+`openshift_openstack_default_flavor` is the default Nova flavor the servers will use.
+You can see your flavors with `openstack flavor list`.
+In order to set a different flavor for a role, uncomment the line with the
+corresponding variable (e.g. `openshift_openstack_lb_flavor` for load balancer) and
+set its value to another available flavor. `openshift_openstack_default_flavor` must
+stay defined as it is used as a default value for the rest of the roles.
+
+`openshift_openstack_external_network_name` is the name of the Neutron network
+providing external connectivity. It is often called `public`,
+`external` or `ext-net`. You can see your networks with `openstack
+network list`.
+
+`openshift_openstack_private_network_name` is the name of the private Neutron network
+providing admin/control access for ansible. It can be merged with other
+cluster networks, there are no special requirements for networking.
+
+The `openshift_openstack_num_masters`, `openshift_openstack_num_infra` and
+`openshift_openstack_num_nodes` values specify the number of Master, Infra and
+App nodes to create.
+
+The `openshift_openstack_cluster_node_labels` defines custom labels for your openshift
+cluster node groups. It currently supports app and infra node groups.
+The default value of this variable sets `region: primary` to app nodes and
+`region: infra` to infra nodes.
+An example of setting a customised label:
+```
+openshift_openstack_cluster_node_labels:
+ app:
+ mylabel: myvalue
+```
+
+The `openshift_openstack_nodes_to_remove` allows you to specify the numerical indexes
+of App nodes that should be removed; for example, ['0', '2'],
+
+The `docker_volume_size` is the default Docker volume size the servers will use.
+In order to set a different volume size for a role,
+uncomment the line with the corresponding variable (e. g. `docker_master_volume_size`
+for master) and change its value. `docker_volume_size` must stay defined as it is
+used as a default value for some of the servers (master, infra, app node).
+The rest of the roles (etcd, load balancer, dns) have their defaults hard-coded.
+
+**Note**: If the `openshift_openstack_ephemeral_volumes` is set to `true`, the `*_volume_size` variables
+will be ignored and the deployment will not create any cinder volumes.
+
+The `openshift_openstack_flat_secgrp`, controls Neutron security groups creation for Heat
+stacks. Set it to true, if you experience issues with sec group rules
+quotas. It trades security for number of rules, by sharing the same set
+of firewall rules for master, node, etcd and infra nodes.
+
+The `openshift_openstack_required_packages` variable also provides a list of the additional
+prerequisite packages to be installed before to deploy an OpenShift cluster.
+Those are ignored though, if the `manage_packages: False`.
+
+## Multi-master configuration
+
+Please refer to the official documentation for the
+[multi-master setup](https://docs.openshift.com/container-platform/3.6/install_config/install/advanced_install.html#multiple-masters)
+and define the corresponding [inventory
+variables](https://docs.openshift.com/container-platform/3.6/install_config/install/advanced_install.html#configuring-cluster-variables)
+in `inventory/group_vars/OSEv3.yml`. For example, given a load balancer node
+under the ansible group named `ext_lb`:
+
+ openshift_master_cluster_hostname: "{{ groups.ext_lb.0 }}"
+ openshift_master_cluster_public_hostname: "{{ groups.ext_lb.0 }}"
+
+## Provider Network
+
+Normally, the playbooks create a new Neutron network and subnet and attach
+floating IP addresses to each node. If you have a provider network set up, this
+is all unnecessary as you can just access servers that are placed in the
+provider network directly.
+
+To use a provider network, set its name in `openshift_openstack_provider_network_name` in
+`inventory/group_vars/all.yml`.
+
+If you set the provider network name, the `openshift_openstack_external_network_name` and
+`openshift_openstack_private_network_name` fields will be ignored.
+
+**NOTE**: this will not update the nodes' DNS, so running openshift-ansible
+right after provisioning will fail (unless you're using an external DNS server
+your provider network knows about). You must make sure your nodes are able to
+resolve each other by name.
+
+## Security notes
+
+Configure required `*_ingress_cidr` variables to restrict public access
+to provisioned servers from your laptop (a /32 notation should be used)
+or your trusted network. The most important is the `openshift_openstack_node_ingress_cidr`
+that restricts public access to the deployed DNS server and cluster
+nodes' ephemeral ports range.
+
+Note, the command ``curl https://api.ipify.org`` helps fiding an external
+IP address of your box (the ansible admin node).
+
+There is also the `manage_packages` variable (defaults to True) you
+may want to turn off in order to speed up the provisioning tasks. This may
+be the case for development environments. When turned off, the servers will
+be provisioned omitting the ``yum update`` command. This brings security
+implications though, and is not recommended for production deployments.
+
+## Configure the OpenShift parameters
+
+Finally, you need to update the DNS entry in
+`inventory/group_vars/OSEv3.yml` (look at
+`openshift_master_default_subdomain`).
+
+In addition, this is the place where you can customise your OpenShift
+installation for example by specifying the authentication.
+
+The full list of options is available in this sample inventory:
+
+https://github.com/openshift/openshift-ansible/blob/master/inventory/hosts.example
+
+Note, that in order to deploy OpenShift origin, you should update the following
+variables for the `inventory/group_vars/OSEv3.yml`, `all.yml`:
+
+ deployment_type: origin
+ openshift_deployment_type: "{{ deployment_type }}"
+
+
+## Setting a custom entrypoint
+
+In order to set a custom entrypoint, update `openshift_master_cluster_public_hostname`
+
+ openshift_master_cluster_public_hostname: api.openshift.example.com
+
+Note than an empty hostname does not work, so if your domain is `openshift.example.com`,
+you cannot set this value to simply `openshift.example.com`.
+
+## Creating and using a Cinder volume for the OpenShift registry
+
+You can optionally have the playbooks create a Cinder volume and set
+it up as the OpenShift hosted registry.
+
+To do that you need specify the desired Cinder volume name and size in
+Gigabytes in `inventory/group_vars/all.yml`:
+
+ openshift_openstack_cinder_hosted_registry_name: cinder-registry
+ openshift_openstack_cinder_hosted_registry_size_gb: 10
+
+With this, the playbooks will create the volume and set up its
+filesystem. If there is an existing volume of the same name, we will
+use it but keep the existing data on it.
+
+To use the volume for the registry, you must first configure it with
+the OpenStack credentials by putting the following to `OSEv3.yml`:
+
+ openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}"
+ openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
+ openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
+ openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}"
+
+This will use the credentials from your shell environment. If you want
+to enter them explicitly, you can. You can also use credentials
+different from the provisioning ones (say for quota or access control
+reasons).
+
+**NOTE**: If you're testing this on (DevStack)[devstack], you must
+explicitly set your Keystone API version to v2 (e.g.
+`OS_AUTH_URL=http://10.34.37.47/identity/v2.0`) instead of the default
+value provided by `openrc`. You may also encounter the following issue
+with Cinder:
+
+https://github.com/kubernetes/kubernetes/issues/50461
+
+You can read the (OpenShift documentation on configuring
+OpenStack)[openstack] for more information.
+
+[devstack]: https://docs.openstack.org/devstack/latest/
+[openstack]: https://docs.openshift.org/latest/install_config/configuring_openstack.html
+
+
+Next, we need to instruct OpenShift to use the Cinder volume for it's
+registry. Again in `OSEv3.yml`:
+
+ #openshift_hosted_registry_storage_kind: openstack
+ #openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce']
+ #openshift_hosted_registry_storage_openstack_filesystem: xfs
+
+The filesystem value here will be used in the initial formatting of
+the volume.
+
+If you're using the dynamic inventory, you must uncomment these two values as
+well:
+
+ #openshift_hosted_registry_storage_openstack_volumeID: "{{ lookup('os_cinder', openshift_openstack_cinder_hosted_registry_name).id }}"
+ #openshift_hosted_registry_storage_volume_size: "{{ openshift_openstack_cinder_hosted_registry_size_gb }}Gi"
+
+But note that they use the `os_cinder` lookup plugin we provide, so you must
+tell Ansible where to find it either in `ansible.cfg` (the one we provide is
+configured properly) or by exporting the
+`ANSIBLE_LOOKUP_PLUGINS=openshift-ansible-contrib/lookup_plugins` environment
+variable.
+
+
+
+## Use an existing Cinder volume for the OpenShift registry
+
+You can also use a pre-existing Cinder volume for the storage of your
+OpenShift registry.
+
+To do that, you need to have a Cinder volume. You can create one by
+running:
+
+ openstack volume create --size <volume size in gb> <volume name>
+
+The volume needs to have a file system created before you put it to
+use.
+
+As with the automatically-created volume, you have to set up the
+OpenStack credentials in `inventory/group_vars/OSEv3.yml` as well as
+registry values:
+
+ #openshift_hosted_registry_storage_kind: openstack
+ #openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce']
+ #openshift_hosted_registry_storage_openstack_filesystem: xfs
+ #openshift_hosted_registry_storage_openstack_volumeID: e0ba2d73-d2f9-4514-a3b2-a0ced507fa05
+ #openshift_hosted_registry_storage_volume_size: 10Gi
+
+Note the `openshift_hosted_registry_storage_openstack_volumeID` and
+`openshift_hosted_registry_storage_volume_size` values: these need to
+be added in addition to the previous variables.
+
+The **Cinder volume ID**, **filesystem** and **volume size** variables
+must correspond to the values in your volume. The volume ID must be
+the **UUID** of the Cinder volume, *not its name*.
+
+We can do formate the volume for you if you ask for it in
+`inventory/group_vars/all.yml`:
+
+ openshift_openstack_prepare_and_format_registry_volume: true
+
+**NOTE:** doing so **will destroy any data that's currently on the volume**!
+
+You can also run the registry setup playbook directly:
+
+ ansible-playbook -i inventory playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml
+
+(the provisioning phase must be completed, first)
+
+
+
+## Using Docker on the Ansible host
+
+If you don't want to worry about the dependencies, you can use the
+[OpenStack Control Host image][control-host-image].
+
+[control-host-image]: https://hub.docker.com/r/redhatcop/control-host-openstack/
+
+It has all the dependencies installed, but you'll need to map your
+code and credentials to it. Assuming your SSH keys live in `~/.ssh`
+and everything else is in your current directory (i.e. `ansible.cfg`,
+`keystonerc`, `inventory`, `openshift-ansible`,
+`openshift-ansible-contrib`), this is how you run the deployment:
+
+ sudo docker run -it -v ~/.ssh:/mnt/.ssh:Z \
+ -v $PWD:/root/openshift:Z \
+ -v $PWD/keystonerc:/root/.config/openstack/keystonerc.sh:Z \
+ redhatcop/control-host-openstack bash
+
+(feel free to replace `$PWD` with an actual path to your inventory and
+checkouts, but note that relative paths don't work)
+
+The first run may take a few minutes while the image is being
+downloaded. After that, you'll be inside the container and you can run
+the playbooks:
+
+ cd openshift
+ ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml
+
+
+## Running Custom Post-Provision Actions
+
+A custom playbook can be run like this:
+
+```
+ansible-playbook --private-key ~/.ssh/openshift -i inventory/ openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/custom-playbook.yml
+```
+
+If you'd like to limit the run to one particular host, you can do so as follows:
+
+```
+ansible-playbook --private-key ~/.ssh/openshift -i inventory/ openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/custom-playbook.yml -l app-node-0.openshift.example.com
+```
+
+You can also create your own custom playbook. Here are a few examples:
+
+### Adding additional YUM repositories
+
+```
+---
+- hosts: app
+ tasks:
+
+ # enable EPL
+ - name: Add repository
+ yum_repository:
+ name: epel
+ description: EPEL YUM repo
+ baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
+```
+
+This example runs against app nodes. The list of options include:
+
+ - cluster_hosts (all hosts: app, infra, masters, dns, lb)
+ - OSEv3 (app, infra, masters)
+ - app
+ - dns
+ - masters
+ - infra_hosts
+
+### Attaching additional RHN pools
+
+```
+---
+- hosts: cluster_hosts
+ tasks:
+ - name: Attach additional RHN pool
+ become: true
+ command: "/usr/bin/subscription-manager attach --pool=<pool ID>"
+ register: attach_rhn_pool_result
+ until: attach_rhn_pool_result.rc == 0
+ retries: 10
+ delay: 1
+```
+
+This playbook runs against all cluster nodes. In order to help prevent slow connectivity
+problems, the task is retried 10 times in case of initial failure.
+Note that in order for this example to work in your deployment, your servers must use the RHEL image.
+
+### Adding extra Docker registry URLs
+
+This playbook is located in the [custom-actions](https://github.com/openshift/openshift-ansible-contrib/tree/master/playbooks/provisioning/openstack/custom-actions) directory.
+
+It adds URLs passed as arguments to the docker configuration program.
+Going into more detail, the configuration program (which is in the YAML format) is loaded into an ansible variable
+([lines 27-30](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml#L27-L30))
+and in its structure, `registries` and `insecure_registries` sections are expanded with the newly added items
+([lines 56-76](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml#L56-L76)).
+The new content is then saved into the original file
+([lines 78-82](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml#L78-L82))
+and docker is restarted.
+
+Example usage:
+```
+ansible-playbook -i <inventory> openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml --extra-vars '{"registries": "reg1", "insecure_registries": ["ins_reg1","ins_reg2"]}'
+```
+
+### Adding extra CAs to the trust chain
+
+This playbook is also located in the [custom-actions](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions) directory.
+It copies passed CAs to the trust chain location and updates the trust chain on each selected host.
+
+Example usage:
+```
+ansible-playbook -i <inventory> openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/add-cas.yml --extra-vars '{"ca_files": [<absolute path to ca1 file>, <absolute path to ca2 file>]}'
+```
+
+Please consider contributing your custom playbook back to openshift-ansible-contrib!
+
+A library of custom post-provision actions exists in `openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions`. Playbooks include:
+
+* [add-yum-repos.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml): adds a list of custom yum repositories to every node in the cluster
+* [add-rhn-pools.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml): attaches a list of additional RHN pools to every node in the cluster
+* [add-docker-registry.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml): adds a list of docker registries to the docker configuration on every node in the cluster
+* [add-cas.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml): adds a list of CAs to the trust chain on every node in the cluster
+
+
+## Install OpenShift
+
+Once it succeeds, you can install openshift by running:
+
+ ansible-playbook openshift-ansible/playbooks/deploy_cluster.yml
+
+## Access UI
+
+OpenShift UI may be accessed via the 1st master node FQDN, port 8443.
+
+## Scale Deployment up/down
+
+### Scaling up
+
+One can scale up the number of application nodes by executing the ansible playbook
+`openshift-ansible-contrib/playbooks/provisioning/openstack/scale-up.yaml`.
+This process can be done even if there is currently no deployment available.
+The `increment_by` variable is used to specify by how much the deployment should
+be scaled up (if none exists, it serves as a target number of application nodes).
+The path to `openshift-ansible` directory can be customised by the `openshift_ansible_dir`
+variable. Its value must be an absolute path to `openshift-ansible` and it cannot
+contain the '/' symbol at the end.
+
+Usage:
+
+```
+ansible-playbook -i <path to inventory> openshift-ansible-contrib/playbooks/provisioning/openstack/scale-up.yaml` [-e increment_by=<number>] [-e openshift_ansible_dir=<path to openshift-ansible>]
+```
diff --git a/playbooks/openstack/openshift-cluster/install.yml b/playbooks/openstack/openshift-cluster/install.yml
new file mode 100644
index 000000000..2ab7d14a0
--- /dev/null
+++ b/playbooks/openstack/openshift-cluster/install.yml
@@ -0,0 +1,15 @@
+---
+# NOTE(shadower): the AWS playbook builds an in-memory inventory of
+# all the EC2 instances here. We don't need to as that's done by the
+# dynamic inventory.
+
+# TODO(shadower): the AWS playbook sets the
+# `openshift_master_cluster_hostname` and `osm_custom_cors_origins`
+# values here. We do it in the OSEv3 group vars. Do we need to add
+# some logic here?
+
+- name: run the cluster deploy
+ import_playbook: ../../prerequisites.yml
+
+- name: run the cluster deploy
+ import_playbook: ../../deploy_cluster.yml
diff --git a/playbooks/openstack/openshift-cluster/prerequisites.yml b/playbooks/openstack/openshift-cluster/prerequisites.yml
new file mode 100644
index 000000000..8bb700501
--- /dev/null
+++ b/playbooks/openstack/openshift-cluster/prerequisites.yml
@@ -0,0 +1,12 @@
+---
+- hosts: localhost
+ tasks:
+ - name: Check dependencies and OpenStack prerequisites
+ import_role:
+ name: openshift_openstack
+ tasks_from: check-prerequisites.yml
+
+ - name: Check network configuration
+ import_role:
+ name: openshift_openstack
+ tasks_from: net_vars_check.yaml
diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml
new file mode 100644
index 000000000..a38d7bff7
--- /dev/null
+++ b/playbooks/openstack/openshift-cluster/provision.yml
@@ -0,0 +1,74 @@
+---
+- name: Create the OpenStack resources for cluster installation
+ hosts: localhost
+ tasks:
+ - name: provision cluster
+ import_role:
+ name: openshift_openstack
+ tasks_from: provision.yml
+
+
+# NOTE(shadower): Bring in the host groups:
+- name: evaluate groups
+ import_playbook: ../../init/evaluate_groups.yml
+
+
+- name: Wait for the nodes and gather their facts
+ hosts: oo_all_hosts
+ become: yes
+ # NOTE: The nodes may not be up yet, don't gather facts here.
+ # They'll be collected after `wait_for_connection`.
+ gather_facts: no
+ tasks:
+ - name: Wait for the the nodes to come up
+ wait_for_connection:
+
+ - name: Gather facts for the new nodes
+ setup:
+
+- name: set common facts
+ import_playbook: ../../init/facts.yml
+
+
+# TODO(shadower): consider splitting this up so people can stop here
+# and configure their DNS if they have to.
+- name: Populate the DNS entries
+ hosts: localhost
+ tasks:
+ - name: Populate DNS entries
+ import_role:
+ name: openshift_openstack
+ tasks_from: populate-dns.yml
+ when:
+ - openshift_openstack_external_nsupdate_keys is defined
+ - openshift_openstack_external_nsupdate_keys.private is defined or openshift_openstack_external_nsupdate_keys.public is defined
+
+- name: Prepare the Nodes in the cluster for installation
+ hosts: oo_all_hosts
+ become: yes
+ gather_facts: yes
+ tasks:
+ - name: Subscribe RHEL instances
+ import_role:
+ name: rhel_subscribe
+ when:
+ - ansible_distribution == "RedHat"
+ - rhsub_user is defined
+ - rhsub_pass is defined
+
+ - name: Enable required YUM repositories
+ import_role:
+ name: openshift_repos
+ when:
+ - ansible_distribution == "RedHat"
+ - rh_subscribed is defined
+
+ - name: Install dependencies
+ import_role:
+ name: openshift_openstack
+ tasks_from: node-packages.yml
+
+ - name: Configure Node
+ import_role:
+ name: openshift_openstack
+ tasks_from: node-configuration.yml
diff --git a/playbooks/openstack/openshift-cluster/provision_install.yml b/playbooks/openstack/openshift-cluster/provision_install.yml
new file mode 100644
index 000000000..fc2854605
--- /dev/null
+++ b/playbooks/openstack/openshift-cluster/provision_install.yml
@@ -0,0 +1,9 @@
+---
+- name: Check the prerequisites for cluster provisioning in OpenStack
+ import_playbook: prerequisites.yml
+
+- name: Include the provision.yml playbook to create cluster
+ import_playbook: provision.yml
+
+- name: Include the install.yml playbook to install cluster
+ import_playbook: install.yml
diff --git a/playbooks/common/openshift-node/roles b/playbooks/openstack/openshift-cluster/roles
index e2b799b9d..e2b799b9d 120000
--- a/playbooks/common/openshift-node/roles
+++ b/playbooks/openstack/openshift-cluster/roles
diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
new file mode 100644
index 000000000..481807dc9
--- /dev/null
+++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
@@ -0,0 +1,59 @@
+---
+## Openshift product versions and repos to install from
+openshift_deployment_type: origin
+#openshift_repos_enable_testing: true
+#openshift_deployment_type: openshift-enterprise
+#openshift_release: v3.5
+openshift_master_default_subdomain: "apps.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}"
+
+openshift_master_cluster_public_hostname: "console.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}"
+
+osm_default_node_selector: 'region=primary'
+
+openshift_hosted_router_wait: True
+openshift_hosted_registry_wait: True
+
+## Openstack credentials
+#openshift_cloudprovider_kind: openstack
+#openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
+#openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}"
+#openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
+#openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}"
+#openshift_cloudprovider_openstack_region: "{{ lookup('env', 'OS_REGION_NAME') }}"
+
+
+## Use Cinder volume for Openshift registry:
+#openshift_hosted_registry_storage_kind: openstack
+#openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce']
+#openshift_hosted_registry_storage_openstack_filesystem: xfs
+
+## NOTE(shadower): This won't work until the openshift-ansible issue #5657 is fixed:
+## https://github.com/openshift/openshift-ansible/issues/5657
+## If you're using the `openshift_openstack_cinder_hosted_registry_name` option from
+## `all.yml`, uncomment these lines:
+#openshift_hosted_registry_storage_openstack_volumeID: "{{ lookup('os_cinder', openshift_openstack_cinder_hosted_registry_name).id }}"
+#openshift_hosted_registry_storage_volume_size: "{{ openshift_openstack_cinder_hosted_registry_size_gb }}Gi"
+
+## If you're using a Cinder volume you've set up yourself, uncomment these lines:
+#openshift_hosted_registry_storage_openstack_volumeID: e0ba2d73-d2f9-4514-a3b2-a0ced507fa05
+#openshift_hosted_registry_storage_volume_size: 10Gi
+
+
+# NOTE(shadower): the hostname check seems to always fail because the
+# host's floating IP address doesn't match the address received from
+# inside the host.
+openshift_override_hostname_check: true
+
+# For POCs or demo environments that are using smaller instances than
+# the official recommended values for RAM and DISK, uncomment the line below.
+#openshift_disable_check: disk_availability,memory_availability
+
+# NOTE(shadower): Always switch to root on the OSEv3 nodes.
+# openshift-ansible requires an explicit `become`.
+ansible_become: true
+
+# # Flannel networking
+#osm_cluster_network_cidr: 10.128.0.0/14
+#openshift_use_openshift_sdn: false
+#openshift_use_flannel: true
+#flannel_interface: eth1
diff --git a/playbooks/openstack/sample-inventory/group_vars/all.yml b/playbooks/openstack/sample-inventory/group_vars/all.yml
new file mode 100644
index 000000000..d63229120
--- /dev/null
+++ b/playbooks/openstack/sample-inventory/group_vars/all.yml
@@ -0,0 +1,126 @@
+---
+openshift_openstack_clusterid: "openshift"
+openshift_openstack_public_dns_domain: "example.com"
+openshift_openstack_dns_nameservers: []
+
+# # Used Hostnames
+# # - set custom hostnames for roles by uncommenting corresponding lines
+#openshift_openstack_master_hostname: "master"
+#openshift_openstack_infra_hostname: "infra-node"
+#openshift_openstack_cns_hostname: "cns"
+#openshift_openstack_node_hostname: "app-node"
+#openshift_openstack_lb_hostname: "lb"
+#openshift_openstack_etcd_hostname: "etcd"
+
+openshift_openstack_keypair_name: "openshift"
+openshift_openstack_external_network_name: "public"
+#openshift_openstack_private_network_name: "openshift-ansible-{{ openshift_openstack_stack_name }}-net"
+# # A dedicated Neutron network name for containers data network
+# # Configures the data network to be separated from openshift_openstack_private_network_name
+# # NOTE: this is only supported with Flannel SDN yet
+#openstack_private_data_network_name: "openshift-ansible-{{ openshift_openstack_stack_name }}-data-net"
+
+## If you want to use a provider network, set its name here.
+## NOTE: the `openshift_openstack_external_network_name` and
+## `openshift_openstack_private_network_name` options will be ignored when using a
+## provider network.
+#openshift_openstack_provider_network_name: "provider"
+
+# # Used Images
+# # - set specific images for roles by uncommenting corresponding lines
+# # - note: do not remove openshift_openstack_default_image_name definition
+#openshift_openstack_master_image_name: "centos7"
+#openshift_openstack_infra_image_name: "centos7"
+#openshift_openstack_cns_image_name: "centos7"
+#openshift_openstack_node_image_name: "centos7"
+#openshift_openstack_lb_image_name: "centos7"
+#openshift_openstack_etcd_image_name: "centos7"
+openshift_openstack_default_image_name: "centos7"
+
+openshift_openstack_num_masters: 1
+openshift_openstack_num_infra: 1
+openshift_openstack_num_cns: 0
+openshift_openstack_num_nodes: 2
+
+# # Used Flavors
+# # - set specific flavors for roles by uncommenting corresponding lines
+# # - note: do note remove openshift_openstack_default_flavor definition
+#openshift_openstack_master_flavor: "m1.medium"
+#openshift_openstack_infra_flavor: "m1.medium"
+#openshift_openstack_cns_flavor: "m1.medium"
+#openshift_openstack_node_flavor: "m1.medium"
+#openshift_openstack_lb_flavor: "m1.medium"
+#openshift_openstack_etcd_flavor: "m1.medium"
+openshift_openstack_default_flavor: "m1.medium"
+
+# # Numerical index of nodes to remove
+# openshift_openstack_nodes_to_remove: []
+
+# # Docker volume size
+# # - set specific volume size for roles by uncommenting corresponding lines
+# # - note: do not remove docker_default_volume_size definition
+#openshift_openstack_docker_master_volume_size: "15"
+#openshift_openstack_docker_infra_volume_size: "15"
+#openshift_openstack_docker_cns_volume_size: "15"
+#openshift_openstack_docker_node_volume_size: "15"
+#openshift_openstack_docker_etcd_volume_size: "2"
+#openshift_openstack_docker_lb_volume_size: "5"
+openshift_openstack_docker_volume_size: "15"
+
+## Specify server group policies for master and infra nodes. Nova must be configured to
+## enable these policies. 'anti-affinity' will ensure that each VM is launched on a
+## different physical host.
+#openshift_openstack_master_server_group_policies: [anti-affinity]
+#openshift_openstack_infra_server_group_policies: [anti-affinity]
+
+## Create a Cinder volume and use it for the OpenShift registry.
+## NOTE: the openstack credentials and hosted registry options must be set in OSEv3.yml!
+#openshift_openstack_cinder_hosted_registry_name: cinder-registry
+#openshift_openstack_cinder_hosted_registry_size_gb: 10
+
+## Set up a filesystem on the cinder volume specified in `OSEv3.yaml`.
+## You need to specify the file system and volume ID in OSEv3 via
+## `openshift_hosted_registry_storage_openstack_filesystem` and
+## `openshift_hosted_registry_storage_openstack_volumeID`.
+## WARNING: This will delete any data on the volume!
+#openshift_openstack_prepare_and_format_registry_volume: False
+
+openshift_openstack_subnet_prefix: "192.168.99"
+
+## Red Hat subscription:
+#rhsub_user: '<username>'
+#rhsub_pass: '<password>'
+#rhsub_pool: '<pool name>'
+
+
+# # Roll-your-own DNS
+#openshift_openstack_external_nsupdate_keys:
+# public:
+# key_secret: 'SKqKNdpfk7llKxZ57bbxUnUDobaaJp9t8CjXLJPl+fRI5mPcSBuxTAyvJPa6Y9R7vUg9DwCy/6WTpgLNqnV4Hg=='
+# key_algorithm: 'hmac-md5'
+# server: '192.168.1.1'
+# private:
+# key_secret: 'kVE2bVTgZjrdJipxPhID8BEZmbHD8cExlVPR+zbFpW6la8kL5wpXiwOh8q5AAosXQI5t95UXwq3Inx8QT58duw=='
+# key_algorithm: 'hmac-md5'
+# server: '192.168.1.2'
+
+
+# NOTE(shadower): Do not change this value. The Ansible user is currently
+# hardcoded to `openshift`.
+ansible_user: openshift
+
+# # Use a single security group for a cluster (default: false)
+#openshift_openstack_flat_secgrp: false
+
+# If you want to use the VM storage instead of Cinder volumes, set this to `true`.
+# NOTE: this is for testing only! Your data will be gone once the VM disappears!
+# openshift_openstack_ephemeral_volumes: false
+
+# # OpenShift node labels
+# # - in order to customise node labels for app and/or infra group, set the
+# # openshift_openstack_cluster_node_labels variable
+#openshift_openstack_cluster_node_labels:
+# app:
+# region: primary
+# infra:
+# region: infra
diff --git a/playbooks/openstack/sample-inventory/inventory.py b/playbooks/openstack/sample-inventory/inventory.py
new file mode 100755
index 000000000..45cc4e15a
--- /dev/null
+++ b/playbooks/openstack/sample-inventory/inventory.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+"""
+This is an Ansible dynamic inventory for OpenStack.
+
+It requires your OpenStack credentials to be set in clouds.yaml or your shell
+environment.
+
+"""
+
+from __future__ import print_function
+
+from collections import Mapping
+import json
+
+import shade
+
+
+def build_inventory():
+ '''Build the dynamic inventory.'''
+ cloud = shade.openstack_cloud()
+
+ inventory = {}
+
+ # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
+ # environment variable.
+ cluster_hosts = [
+ server for server in cloud.list_servers()
+ if 'metadata' in server and 'clusterid' in server.metadata]
+
+ masters = [server.name for server in cluster_hosts
+ if server.metadata['host-type'] == 'master']
+
+ etcd = [server.name for server in cluster_hosts
+ if server.metadata['host-type'] == 'etcd']
+ if not etcd:
+ etcd = masters
+
+ infra_hosts = [server.name for server in cluster_hosts
+ if server.metadata['host-type'] == 'node' and
+ server.metadata['sub-host-type'] == 'infra']
+
+ app = [server.name for server in cluster_hosts
+ if server.metadata['host-type'] == 'node' and
+ server.metadata['sub-host-type'] == 'app']
+
+ cns = [server.name for server in cluster_hosts
+ if server.metadata['host-type'] == 'cns']
+
+ nodes = list(set(masters + infra_hosts + app + cns))
+
+ dns = [server.name for server in cluster_hosts
+ if server.metadata['host-type'] == 'dns']
+
+ load_balancers = [server.name for server in cluster_hosts
+ if server.metadata['host-type'] == 'lb']
+
+ osev3 = list(set(nodes + etcd + load_balancers))
+
+ inventory['cluster_hosts'] = {'hosts': [s.name for s in cluster_hosts]}
+ inventory['OSEv3'] = {'hosts': osev3}
+ inventory['masters'] = {'hosts': masters}
+ inventory['etcd'] = {'hosts': etcd}
+ inventory['nodes'] = {'hosts': nodes}
+ inventory['infra_hosts'] = {'hosts': infra_hosts}
+ inventory['app'] = {'hosts': app}
+ inventory['glusterfs'] = {'hosts': cns}
+ inventory['dns'] = {'hosts': dns}
+ inventory['lb'] = {'hosts': load_balancers}
+
+ for server in cluster_hosts:
+ if 'group' in server.metadata:
+ group = server.metadata.group
+ if group not in inventory:
+ inventory[group] = {'hosts': []}
+ inventory[group]['hosts'].append(server.name)
+
+ inventory['_meta'] = {'hostvars': {}}
+
+ for server in cluster_hosts:
+ ssh_ip_address = server.public_v4 or server.private_v4
+ hostvars = {
+ 'ansible_host': ssh_ip_address
+ }
+
+ public_v4 = server.public_v4 or server.private_v4
+ if public_v4:
+ hostvars['public_v4'] = server.public_v4
+ hostvars['openshift_public_ip'] = server.public_v4
+ # TODO(shadower): what about multiple networks?
+ if server.private_v4:
+ hostvars['private_v4'] = server.private_v4
+ # NOTE(shadower): Yes, we set both hostname and IP to the private
+ # IP address for each node. OpenStack doesn't resolve nodes by
+ # name at all, so using a hostname here would require an internal
+ # DNS which would complicate the setup and potentially introduce
+ # performance issues.
+ hostvars['openshift_ip'] = server.private_v4
+ hostvars['openshift_hostname'] = server.private_v4
+ hostvars['openshift_public_hostname'] = server.name
+
+ if server.metadata['host-type'] == 'cns':
+ hostvars['glusterfs_devices'] = ['/dev/nvme0n1']
+
+ node_labels = server.metadata.get('node_labels')
+ # NOTE(shadower): the node_labels value must be a dict not string
+ if not isinstance(node_labels, Mapping):
+ node_labels = json.loads(node_labels)
+
+ if node_labels:
+ hostvars['openshift_node_labels'] = node_labels
+
+ inventory['_meta']['hostvars'][server.name] = hostvars
+ return inventory
+
+
+if __name__ == '__main__':
+ print(json.dumps(build_inventory(), indent=4, sort_keys=True))
diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml
new file mode 100644
index 000000000..7802f83d9
--- /dev/null
+++ b/playbooks/prerequisites.yml
@@ -0,0 +1,21 @@
+---
+- import_playbook: init/main.yml
+ vars:
+ skip_verison: True
+
+- import_playbook: init/validate_hostnames.yml
+ when: not (skip_validate_hostnames | default(False))
+
+- import_playbook: init/repos.yml
+
+- import_playbook: init/base_packages.yml
+
+# This is required for container runtime for crio, only needs to run once.
+- name: Configure os_firewall
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config:oo_nodes_to_config
+ roles:
+ - role: os_firewall
+
+- import_playbook: container-runtime/private/setup_storage.yml
+
+- import_playbook: container-runtime/private/config.yml
diff --git a/playbooks/redeploy-certificates.yml b/playbooks/redeploy-certificates.yml
new file mode 100644
index 000000000..4e6defd6e
--- /dev/null
+++ b/playbooks/redeploy-certificates.yml
@@ -0,0 +1,26 @@
+---
+- import_playbook: init/main.yml
+
+- import_playbook: openshift-etcd/private/redeploy-certificates.yml
+
+- import_playbook: openshift-master/private/redeploy-certificates.yml
+
+- import_playbook: openshift-node/private/redeploy-certificates.yml
+
+- import_playbook: openshift-etcd/private/restart.yml
+ vars:
+ g_etcd_certificates_expired: "{{ ('expired' in (hostvars | lib_utils_oo_select_keys(groups['etcd']) | lib_utils_oo_collect('check_results.check_results.etcd') | lib_utils_oo_collect('health'))) | bool }}"
+
+- import_playbook: openshift-master/private/restart.yml
+
+- import_playbook: openshift-node/private/restart.yml
+
+- import_playbook: openshift-hosted/private/redeploy-router-certificates.yml
+ when: openshift_hosted_manage_router | default(true) | bool
+
+- import_playbook: openshift-hosted/private/redeploy-registry-certificates.yml
+ when: openshift_hosted_manage_registry | default(true) | bool
+
+- import_playbook: openshift-master/private/revert-client-ca.yml
+
+- import_playbook: openshift-master/private/restart.yml
diff --git a/playbooks/roles b/playbooks/roles
new file mode 120000
index 000000000..d8c4472ca
--- /dev/null
+++ b/playbooks/roles
@@ -0,0 +1 @@
+../roles \ No newline at end of file