summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-x.papr.sh2
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--DEPLOYMENT_TYPES.md2
-rw-r--r--README.md23
-rw-r--r--ansible.cfg4
-rw-r--r--callback_plugins/aa_version_requirement.py2
-rw-r--r--docs/proposals/role_decomposition.md2
-rw-r--r--filter_plugins/oo_filters.py29
-rw-r--r--images/installer/root/exports/manifest.json2
-rwxr-xr-ximages/installer/root/usr/local/bin/run2
-rw-r--r--inventory/.gitignore (renamed from inventory/byo/.gitignore)0
-rw-r--r--inventory/README.md6
-rw-r--r--inventory/hosts.example (renamed from inventory/byo/hosts.example)10
-rw-r--r--inventory/hosts.glusterfs.external.example (renamed from inventory/byo/hosts.byo.glusterfs.external.example)10
-rw-r--r--inventory/hosts.glusterfs.mixed.example (renamed from inventory/byo/hosts.byo.glusterfs.mixed.example)10
-rw-r--r--inventory/hosts.glusterfs.native.example (renamed from inventory/byo/hosts.byo.glusterfs.native.example)10
-rw-r--r--inventory/hosts.glusterfs.registry-only.example (renamed from inventory/byo/hosts.byo.glusterfs.registry-only.example)8
-rw-r--r--inventory/hosts.glusterfs.storage-and-registry.example (renamed from inventory/byo/hosts.byo.glusterfs.storage-and-registry.example)8
-rw-r--r--inventory/hosts.openstack (renamed from inventory/byo/hosts.openstack)2
-rw-r--r--openshift-ansible.spec284
-rw-r--r--playbooks/adhoc/uninstall.yml2
-rw-r--r--playbooks/aws/README.md6
-rwxr-xr-xplaybooks/aws/openshift-cluster/accept.yml4
-rw-r--r--playbooks/aws/openshift-cluster/build_ami.yml6
-rw-r--r--playbooks/aws/openshift-cluster/hosted.yml12
-rw-r--r--playbooks/aws/openshift-cluster/install.yml18
-rw-r--r--playbooks/aws/openshift-cluster/prerequisites.yml6
-rw-r--r--playbooks/aws/openshift-cluster/provision_install.yml10
-rw-r--r--playbooks/aws/provisioning-inventory.example.ini1
-rw-r--r--playbooks/byo/config.yml2
-rw-r--r--playbooks/byo/openshift-cluster/config.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift_facts.yml5
-rw-r--r--playbooks/byo/rhel_subscribe.yml2
-rw-r--r--playbooks/common/openshift-cluster/config.yml44
-rw-r--r--playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml21
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_excluders.yml (renamed from playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml)3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml13
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh25
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml57
l---------playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins1
l---------playbooks/common/openshift-cluster/upgrades/etcd/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/config.yml77
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml93
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml34
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml36
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml107
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml108
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml103
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml121
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml122
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml103
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml121
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml122
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml103
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml123
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml119
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml113
-rw-r--r--playbooks/container-runtime/config.yml6
-rw-r--r--playbooks/container-runtime/private/config.yml28
l---------playbooks/container-runtime/private/roles1
-rw-r--r--playbooks/deploy_cluster.yml46
-rw-r--r--playbooks/gcp/provision.yml (renamed from playbooks/gcp/openshift-cluster/provision.yml)7
-rw-r--r--playbooks/init/evaluate_groups.yml7
-rw-r--r--playbooks/init/repos.yml8
-rw-r--r--playbooks/init/validate_hostnames.yml34
-rw-r--r--playbooks/openshift-etcd/private/ca.yml2
-rw-r--r--playbooks/openshift-etcd/private/certificates-backup.yml6
-rw-r--r--playbooks/openshift-etcd/private/config.yml1
-rw-r--r--playbooks/openshift-etcd/private/embedded2external.yml24
-rw-r--r--playbooks/openshift-etcd/private/migrate.yml22
-rw-r--r--playbooks/openshift-etcd/private/redeploy-ca.yml12
-rw-r--r--playbooks/openshift-etcd/private/redeploy-certificates.yml4
-rw-r--r--playbooks/openshift-etcd/private/scaleup.yml4
-rw-r--r--playbooks/openshift-etcd/private/server_certificates.yml2
-rw-r--r--playbooks/openshift-etcd/private/upgrade_backup.yml (renamed from playbooks/common/openshift-cluster/upgrades/etcd/backup.yml)3
-rw-r--r--playbooks/openshift-etcd/private/upgrade_image_members.yml (renamed from playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml)2
-rw-r--r--playbooks/openshift-etcd/private/upgrade_main.yml (renamed from playbooks/common/openshift-cluster/upgrades/etcd/main.yml)8
-rw-r--r--playbooks/openshift-etcd/private/upgrade_rpm_members.yml (renamed from playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml)2
-rw-r--r--playbooks/openshift-etcd/private/upgrade_step.yml (renamed from playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml)24
-rw-r--r--playbooks/openshift-etcd/redeploy-ca.yml4
-rw-r--r--playbooks/openshift-etcd/redeploy-certificates.yml8
-rw-r--r--playbooks/openshift-etcd/upgrade.yml4
-rw-r--r--playbooks/openshift-glusterfs/README.md3
-rw-r--r--playbooks/openshift-hosted/redeploy-registry-certificates.yml4
-rw-r--r--playbooks/openshift-hosted/redeploy-router-certificates.yml4
-rw-r--r--playbooks/openshift-loadbalancer/private/config.yml7
-rw-r--r--playbooks/openshift-logging/config.yml6
-rw-r--r--playbooks/openshift-master/private/config.yml6
-rw-r--r--playbooks/openshift-master/private/redeploy-certificates.yml4
-rw-r--r--playbooks/openshift-master/private/redeploy-openshift-ca.yml6
-rw-r--r--playbooks/openshift-master/private/scaleup.yml4
-rw-r--r--playbooks/openshift-master/private/tasks/wire_aggregator.yml4
-rw-r--r--playbooks/openshift-master/redeploy-certificates.yml6
-rw-r--r--playbooks/openshift-master/redeploy-openshift-ca.yml4
-rw-r--r--playbooks/openshift-nfs/private/config.yml1
-rw-r--r--playbooks/openshift-node/private/additional_config.yml1
-rw-r--r--playbooks/openshift-node/private/configure_nodes.yml1
-rw-r--r--playbooks/openshift-node/private/containerized_nodes.yml1
-rw-r--r--playbooks/openshift-node/private/enable_excluders.yml1
-rw-r--r--playbooks/openshift-node/private/redeploy-certificates.yml4
-rw-r--r--playbooks/openshift-node/private/restart.yml9
-rw-r--r--playbooks/openshift-node/private/setup.yml1
-rw-r--r--playbooks/openshift-node/redeploy-certificates.yml6
-rw-r--r--playbooks/openstack/README.md2
-rw-r--r--playbooks/openstack/advanced-configuration.md25
-rw-r--r--playbooks/openstack/openshift-cluster/install.yml7
-rw-r--r--playbooks/openstack/openshift-cluster/provision.yml4
-rw-r--r--playbooks/openstack/openshift-cluster/provision_install.yml6
-rw-r--r--playbooks/prerequisites.yml16
-rw-r--r--playbooks/redeploy-certificates.yml22
-rw-r--r--requirements.txt2
-rw-r--r--roles/calico/tasks/main.yml1
-rw-r--r--roles/container_runtime/README.md25
-rw-r--r--roles/container_runtime/defaults/main.yml57
-rw-r--r--roles/container_runtime/tasks/common/atomic_proxy.yml (renamed from roles/openshift_atomic/tasks/proxy.yml)0
-rw-r--r--roles/container_runtime/tasks/common/post.yml26
-rw-r--r--roles/container_runtime/tasks/common/pre.yml12
-rw-r--r--roles/container_runtime/tasks/common/setup_docker_symlink.yml38
-rw-r--r--roles/container_runtime/tasks/common/syscontainer_packages.yml28
-rw-r--r--roles/container_runtime/tasks/common/udev_workaround.yml (renamed from roles/container_runtime/tasks/udev_workaround.yml)0
-rw-r--r--roles/container_runtime/tasks/docker_sanity.yml27
-rw-r--r--roles/container_runtime/tasks/docker_upgrade_check.yml67
-rw-r--r--roles/container_runtime/tasks/main.yml85
-rw-r--r--roles/container_runtime/tasks/package_docker.yml36
-rw-r--r--roles/container_runtime/tasks/systemcontainer_crio.yml87
-rw-r--r--roles/container_runtime/tasks/systemcontainer_docker.yml78
-rw-r--r--roles/container_runtime/templates/crio.conf.j22
-rw-r--r--roles/container_runtime/templates/daemon.json4
-rw-r--r--roles/contiv/meta/main.yml2
-rw-r--r--roles/contiv/tasks/default_network.yml13
-rw-r--r--roles/contiv/tasks/netmaster_iptables.yml8
-rw-r--r--roles/contiv/tasks/pkgMgrInstallers/centos-install.yml2
-rw-r--r--roles/contiv/templates/aci-gw.service5
-rw-r--r--roles/contiv/templates/netmaster.service5
-rw-r--r--roles/contiv/templates/netplugin.service5
-rw-r--r--roles/contiv_facts/tasks/rpm.yml9
-rw-r--r--roles/etcd/defaults/main.yaml2
-rw-r--r--roles/etcd/meta/main.yml1
-rw-r--r--roles/etcd/tasks/migration/add_ttls.yml2
-rw-r--r--roles/etcd/tasks/system_container.yml5
-rw-r--r--roles/flannel/defaults/main.yaml6
-rw-r--r--roles/flannel/handlers/main.yml2
-rw-r--r--roles/flannel_register/defaults/main.yaml2
-rw-r--r--roles/kuryr/tasks/node.yaml4
-rw-r--r--roles/lib_utils/library/oo_ec2_group.py903
-rw-r--r--roles/nuage_master/handlers/main.yaml4
-rw-r--r--roles/nuage_node/handlers/main.yaml2
-rw-r--r--roles/nuage_node/vars/main.yaml2
-rw-r--r--roles/openshift_atomic/README.md28
-rw-r--r--roles/openshift_atomic/meta/main.yml13
-rw-r--r--roles/openshift_aws/defaults/main.yml41
-rw-r--r--roles/openshift_aws/filter_plugins/openshift_aws_filters.py35
-rw-r--r--roles/openshift_aws/tasks/accept_nodes.yml11
-rw-r--r--roles/openshift_aws/tasks/build_node_group.yml31
-rw-r--r--roles/openshift_aws/tasks/iam_role.yml14
-rw-r--r--roles/openshift_aws/tasks/launch_config.yml37
-rw-r--r--roles/openshift_aws/tasks/launch_config_create.yml26
-rw-r--r--roles/openshift_aws/tasks/provision.yml9
-rw-r--r--roles/openshift_aws/tasks/provision_instance.yml2
-rw-r--r--roles/openshift_aws/tasks/provision_nodes.yml19
-rw-r--r--roles/openshift_aws/tasks/remove_scale_group.yml9
-rw-r--r--roles/openshift_aws/tasks/s3.yml2
-rw-r--r--roles/openshift_aws/tasks/scale_group.yml36
-rw-r--r--roles/openshift_aws/tasks/seal_ami.yml6
-rw-r--r--roles/openshift_aws/tasks/security_group.yml30
-rw-r--r--roles/openshift_aws/tasks/security_group_create.yml25
-rw-r--r--roles/openshift_aws/tasks/setup_master_group.yml6
-rw-r--r--roles/openshift_aws/tasks/setup_scale_group_facts.yml22
-rw-r--r--roles/openshift_aws/tasks/upgrade_node_group.yml18
-rw-r--r--roles/openshift_aws/tasks/wait_for_groups.yml22
-rw-r--r--roles/openshift_aws/templates/user_data.j26
-rw-r--r--roles/openshift_builddefaults/tasks/main.yml5
-rw-r--r--roles/openshift_ca/meta/main.yml1
-rw-r--r--roles/openshift_ca/tasks/main.yml2
-rw-r--r--roles/openshift_cli/library/openshift_container_binary_sync.py7
-rw-r--r--roles/openshift_cli/tasks/main.yml2
-rw-r--r--roles/openshift_docker_gc/templates/dockergc-ds.yaml.j22
-rwxr-xr-xroles/openshift_examples/examples-sync.sh2
l---------roles/openshift_examples/files/examples/latest1
l---------roles/openshift_examples/files/examples/v3.7/v3.81
-rw-r--r--roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json21
-rw-r--r--roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json21
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json3
l---------roles/openshift_examples/files/examples/v3.9/latest1
-rw-r--r--roles/openshift_excluder/README.md5
-rw-r--r--roles/openshift_excluder/defaults/main.yml2
-rw-r--r--roles/openshift_excluder/meta/main.yml1
-rw-r--r--roles/openshift_excluder/tasks/main.yml5
-rw-r--r--roles/openshift_facts/defaults/main.yml6
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py182
-rw-r--r--roles/openshift_health_checker/HOWTO_CHECKS.md2
-rw-r--r--roles/openshift_health_checker/defaults/main.yml6
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_availability.py4
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py4
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py2
-rw-r--r--roles/openshift_health_checker/test/etcd_traffic_test.py5
-rw-r--r--roles/openshift_health_checker/test/ovs_version_test.py9
-rw-r--r--roles/openshift_health_checker/test/package_availability_test.py8
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py7
-rw-r--r--roles/openshift_hosted/README.md7
-rw-r--r--roles/openshift_hosted/meta/main.yml2
-rw-r--r--roles/openshift_hosted/tasks/registry.yml2
-rw-r--r--roles/openshift_hosted/templates/v3.8/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_hosted/templates/v3.8/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_hosted/templates/v3.9/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_hosted/templates/v3.9/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_hosted_facts/meta/main.yml15
-rw-r--r--roles/openshift_hosted_facts/tasks/main.yml1
-rw-r--r--roles/openshift_logging/handlers/main.yml4
-rw-r--r--roles/openshift_logging/tasks/annotate_ops_projects.yaml19
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml18
-rw-r--r--roles/openshift_logging/tasks/main.yaml1
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j21
-rw-r--r--roles/openshift_logging_fluentd/tasks/label_and_wait.yaml1
-rw-r--r--roles/openshift_management/README.md10
-rw-r--r--roles/openshift_management/defaults/main.yml2
-rw-r--r--roles/openshift_management/meta/main.yml1
-rw-r--r--roles/openshift_master/defaults/main.yml42
-rw-r--r--roles/openshift_master/handlers/main.yml4
-rw-r--r--roles/openshift_master/tasks/main.yml16
-rw-r--r--roles/openshift_master/tasks/registry_auth.yml2
-rw-r--r--roles/openshift_master/tasks/restart.yml4
-rw-r--r--roles/openshift_master/tasks/system_container.yml10
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml28
-rw-r--r--roles/openshift_master/tasks/upgrade/rpm_upgrade.yml12
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j216
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j216
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j218
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j26
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j28
-rw-r--r--roles/openshift_master/vars/main.yml41
-rw-r--r--roles/openshift_master_facts/tasks/main.yml1
-rw-r--r--roles/openshift_metrics/handlers/main.yml4
-rw-r--r--roles/openshift_node/README.md6
-rw-r--r--roles/openshift_node/defaults/main.yml6
-rw-r--r--roles/openshift_node/handlers/main.yml8
-rw-r--r--roles/openshift_node/tasks/aws.yml2
-rw-r--r--roles/openshift_node/tasks/config.yml12
-rw-r--r--roles/openshift_node/tasks/config/configure-node-settings.yml2
-rw-r--r--roles/openshift_node/tasks/config/configure-proxy-settings.yml2
-rw-r--r--roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml2
-rw-r--r--roles/openshift_node/tasks/config/install-node-docker-service-file.yml2
-rw-r--r--roles/openshift_node/tasks/container_images.yml20
-rw-r--r--roles/openshift_node/tasks/dnsmasq.yml43
-rw-r--r--roles/openshift_node/tasks/dnsmasq_install.yml43
-rw-r--r--roles/openshift_node/tasks/docker/upgrade.yml42
-rw-r--r--roles/openshift_node/tasks/install.yml4
-rw-r--r--roles/openshift_node/tasks/main.yml3
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml8
-rw-r--r--roles/openshift_node/tasks/openvswitch_system_container.yml5
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml25
-rw-r--r--roles/openshift_node/tasks/upgrade.yml159
-rw-r--r--roles/openshift_node/tasks/upgrade/config_changes.yml77
-rw-r--r--roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml14
-rw-r--r--roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml15
-rw-r--r--roles/openshift_node/tasks/upgrade/restart.yml17
-rw-r--r--roles/openshift_node/tasks/upgrade/rpm_upgrade.yml35
-rw-r--r--roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml19
-rw-r--r--roles/openshift_node/tasks/upgrade/stop_services.yml43
-rw-r--r--roles/openshift_node/tasks/upgrade_pre.yml56
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j24
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.dep.service8
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service22
-rw-r--r--roles/openshift_openstack/defaults/main.yml3
-rw-r--r--roles/openshift_openstack/tasks/populate-dns.yml4
-rw-r--r--roles/openshift_prometheus/README.md2
-rw-r--r--roles/openshift_prometheus/defaults/main.yaml3
-rw-r--r--roles/openshift_prometheus/tasks/install_prometheus.yaml3
-rw-r--r--roles/openshift_provisioners/tasks/install_efs.yaml2
-rw-r--r--roles/openshift_repos/tasks/main.yaml7
-rw-r--r--roles/openshift_repos/tasks/rhel_repos.yml34
-rw-r--r--roles/openshift_storage_glusterfs/README.md17
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml2
-rw-r--r--roles/openshift_storage_glusterfs/meta/main.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml1
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml1
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j24
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j24
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j24
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j24
-rw-r--r--roles/openshift_storage_nfs/meta/main.yml2
-rw-r--r--roles/openshift_version/defaults/main.yml8
-rw-r--r--roles/openshift_version/meta/main.yml1
-rw-r--r--roles/openshift_version/tasks/main.yml8
-rw-r--r--roles/openshift_version/tasks/set_version_rpm.yml6
-rw-r--r--roles/rhel_subscribe/README.md29
-rw-r--r--roles/rhel_subscribe/defaults/main.yml2
-rw-r--r--roles/rhel_subscribe/tasks/enterprise.yml25
-rw-r--r--roles/rhel_subscribe/tasks/main.yml84
-rw-r--r--roles/rhel_subscribe/tasks/satellite.yml5
-rw-r--r--setup.py56
-rw-r--r--[-rwxr-xr-x]test/integration/build-images.sh0
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml6
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml4
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml6
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml6
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml4
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml6
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml6
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml8
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml8
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml4
-rw-r--r--test/integration/openshift_health_checker/setup_container.yml7
-rw-r--r--[-rwxr-xr-x]test/integration/run-tests.sh0
-rw-r--r--tox.ini3
352 files changed, 3539 insertions, 3243 deletions
diff --git a/.papr.sh b/.papr.sh
index 80453d4b2..c7b06f059 100755
--- a/.papr.sh
+++ b/.papr.sh
@@ -35,7 +35,7 @@ trap upload_journals ERR
# run the actual installer
# FIXME: override openshift_image_tag defined in the inventory until
# https://github.com/openshift/openshift-ansible/issues/4478 is fixed.
-ansible-playbook -vvv -i .papr.inventory playbooks/byo/config.yml -e "openshift_image_tag=$OPENSHIFT_IMAGE_TAG"
+ansible-playbook -vvv -i .papr.inventory playbooks/deploy_cluster.yml -e "openshift_image_tag=$OPENSHIFT_IMAGE_TAG"
### DISABLING TESTS FOR NOW, SEE:
### https://github.com/openshift/openshift-ansible/pull/6132
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 9db0b5c98..1ca23082d 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.8.0-0.13.0 ./
+3.9.0-0.10.0 ./
diff --git a/DEPLOYMENT_TYPES.md b/DEPLOYMENT_TYPES.md
index e52e47202..3788e9bfb 100644
--- a/DEPLOYMENT_TYPES.md
+++ b/DEPLOYMENT_TYPES.md
@@ -10,7 +10,7 @@ The table below outlines the defaults per `openshift_deployment_type`:
| openshift_deployment_type | origin | openshift-enterprise |
|-----------------------------------------------------------------|------------------------------------------|----------------------------------------|
-| **openshift.common.service_type** (also used for package names) | origin | atomic-openshift |
+| **openshift_service_type** (also used for package names) | origin | atomic-openshift |
| **openshift.common.config_base** | /etc/origin | /etc/origin |
| **openshift_data_dir** | /var/lib/origin | /var/lib/origin |
| **openshift.master.registry_url openshift.node.registry_url** | openshift/origin-${component}:${version} | openshift3/ose-${component}:${version} |
diff --git a/README.md b/README.md
index cc88b60bf..fdedf2f19 100644
--- a/README.md
+++ b/README.md
@@ -16,20 +16,27 @@ tracked by
[#2720](https://github.com/openshift/openshift-ansible/issues/2720).
## Getting the correct version
+When choosing an openshift release, ensure that the necessary origin packages
+are available in your distribution's repository. By default, openshift-ansible
+will not configure extra repositories for testing or staging packages for
+end users.
-The
+We recommend using a release branch. We maintain stable branches
+corresponding to upstream Origin releases, e.g.: we guarantee an
+openshift-ansible 3.2 release will fully support an origin
+[1.2 release](https://github.com/openshift/openshift-ansible/tree/release-1.2).
+
+The most recent branch will often receive minor feature backports and
+fixes. Older branches will receive only critical fixes.
+
+In addition to the release branches, the master branch
[master branch](https://github.com/openshift/openshift-ansible/tree/master)
tracks our current work **in development** and should be compatible
with the
[Origin master branch](https://github.com/openshift/origin/tree/master)
(code in development).
-In addition to the master branch, we maintain stable branches
-corresponding to upstream Origin releases, e.g.: we guarantee an
-openshift-ansible 3.2 release will fully support an origin
-[1.2 release](https://github.com/openshift/openshift-ansible/tree/release-1.2).
-The most recent branch will often receive minor feature backports and
-fixes. Older branches will receive only critical fixes.
+
**Getting the right openshift-ansible release**
@@ -54,7 +61,7 @@ Install base dependencies:
Requirements:
-- Ansible >= 2.3.0.0
+- Ansible >= 2.4.1.0
- Jinja >= 2.7
- pyOpenSSL
- python-lxml
diff --git a/ansible.cfg b/ansible.cfg
index 9900d28f8..e4d72553e 100644
--- a/ansible.cfg
+++ b/ansible.cfg
@@ -30,8 +30,8 @@ inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt
# work around privilege escalation timeouts in ansible:
timeout = 30
-# Uncomment to use the provided BYO inventory
-#inventory = inventory/byo/hosts.example
+# Uncomment to use the provided example inventory
+#inventory = inventory/hosts.example
[inventory]
# fail more helpfully when the inventory file does not parse (Ansible 2.4+)
diff --git a/callback_plugins/aa_version_requirement.py b/callback_plugins/aa_version_requirement.py
index 110b3d673..1093acdae 100644
--- a/callback_plugins/aa_version_requirement.py
+++ b/callback_plugins/aa_version_requirement.py
@@ -29,7 +29,7 @@ else:
# Set to minimum required Ansible version
-REQUIRED_VERSION = '2.4.0.0'
+REQUIRED_VERSION = '2.4.1.0'
DESCRIPTION = "Supported versions: %s or newer" % REQUIRED_VERSION
diff --git a/docs/proposals/role_decomposition.md b/docs/proposals/role_decomposition.md
index 6434e24e7..37d080d5c 100644
--- a/docs/proposals/role_decomposition.md
+++ b/docs/proposals/role_decomposition.md
@@ -262,7 +262,7 @@ dependencies:
- name: "Create logging project"
command: >
- {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}}
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}}
when: not ansible_check_mode and "not found" in logging_project_result.stderr
- name: Create logging cert directory
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 45795d097..3eaf2aed5 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -375,6 +375,13 @@ def oo_split(string, separator=','):
return string.split(separator)
+def oo_list_to_dict(lst, separator='='):
+ """ This converts a list of ["k=v"] to a dictionary {k: v}.
+ """
+ kvs = [i.split(separator) for i in lst]
+ return {k: v for k, v in kvs}
+
+
def oo_haproxy_backend_masters(hosts, port):
""" This takes an array of dicts and returns an array of dicts
to be used as a backend for the haproxy role
@@ -690,26 +697,6 @@ def to_padded_yaml(data, level=0, indent=2, **kw):
raise errors.AnsibleFilterError('Failed to convert: %s' % my_e)
-def oo_openshift_env(hostvars):
- ''' Return facts which begin with "openshift_" and translate
- legacy facts to their openshift_env counterparts.
-
- Ex: hostvars = {'openshift_fact': 42,
- 'theyre_taking_the_hobbits_to': 'isengard'}
- returns = {'openshift_fact': 42}
- '''
- if not issubclass(type(hostvars), dict):
- raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
-
- facts = {}
- regex = re.compile('^openshift_.*')
- for key in hostvars:
- if regex.match(key):
- facts[key] = hostvars[key]
-
- return facts
-
-
def oo_31_rpm_rename_conversion(rpms, openshift_version=None):
""" Filters a list of 3.0 rpms and return the corresponding 3.1 rpms
names with proper version (if provided)
@@ -989,6 +976,7 @@ class FilterModule(object):
"oo_combine_dict": oo_combine_dict,
"oo_dict_to_list_of_dict": oo_dict_to_list_of_dict,
"oo_split": oo_split,
+ "oo_list_to_dict": oo_list_to_dict,
"oo_filter_list": oo_filter_list,
"oo_parse_heat_stack_outputs": oo_parse_heat_stack_outputs,
"oo_parse_named_certificates": oo_parse_named_certificates,
@@ -996,7 +984,6 @@ class FilterModule(object):
"oo_pretty_print_cluster": oo_pretty_print_cluster,
"oo_generate_secret": oo_generate_secret,
"oo_nodes_with_label": oo_nodes_with_label,
- "oo_openshift_env": oo_openshift_env,
"oo_31_rpm_rename_conversion": oo_31_rpm_rename_conversion,
"oo_pods_match_component": oo_pods_match_component,
"oo_get_hosts_from_hostvars": oo_get_hosts_from_hostvars,
diff --git a/images/installer/root/exports/manifest.json b/images/installer/root/exports/manifest.json
index 8b984d7a3..53696b03e 100644
--- a/images/installer/root/exports/manifest.json
+++ b/images/installer/root/exports/manifest.json
@@ -4,7 +4,7 @@
"OPTS": "",
"VAR_LIB_OPENSHIFT_INSTALLER" : "/var/lib/openshift-installer",
"VAR_LOG_OPENSHIFT_LOG": "/var/log/ansible.log",
- "PLAYBOOK_FILE": "/usr/share/ansible/openshift-ansible/playbooks/byo/config.yml",
+ "PLAYBOOK_FILE": "/usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.yml",
"HOME_ROOT": "/root",
"ANSIBLE_CONFIG": "/usr/share/atomic-openshift-utils/ansible.cfg",
"INVENTORY_FILE": "/dev/null"
diff --git a/images/installer/root/usr/local/bin/run b/images/installer/root/usr/local/bin/run
index cd38a6ff0..67cf7dfde 100755
--- a/images/installer/root/usr/local/bin/run
+++ b/images/installer/root/usr/local/bin/run
@@ -18,7 +18,7 @@ INVENTORY="$(mktemp)"
if [[ -v INVENTORY_FILE ]]; then
# Make a copy so that ALLOW_ANSIBLE_CONNECTION_LOCAL below
# does not attempt to modify the original
- cp -a ${INVENTORY_FILE} ${INVENTORY}
+ cp ${INVENTORY_FILE} ${INVENTORY}
elif [[ -v INVENTORY_DIR ]]; then
INVENTORY="$(mktemp -d)"
cp -R ${INVENTORY_DIR}/* ${INVENTORY}
diff --git a/inventory/byo/.gitignore b/inventory/.gitignore
index 6ff331c7e..6ff331c7e 100644
--- a/inventory/byo/.gitignore
+++ b/inventory/.gitignore
diff --git a/inventory/README.md b/inventory/README.md
index 5e26e3c32..2e348194f 100644
--- a/inventory/README.md
+++ b/inventory/README.md
@@ -1,5 +1 @@
-# OpenShift Ansible inventory config files
-
-You can install OpenShift on:
-
-* [BYO](byo/) (Bring your own), use this inventory config file to install OpenShift on your pre-existing hosts
+# OpenShift Ansible example inventory config files
diff --git a/inventory/byo/hosts.example b/inventory/hosts.example
index e3b56d7a1..d857cd1a7 100644
--- a/inventory/byo/hosts.example
+++ b/inventory/hosts.example
@@ -1,4 +1,4 @@
-# This is an example of a bring your own (byo) host inventory
+# This is an example of an OpenShift-Ansible host inventory
# Create an OSEv3 group that contains the masters and nodes groups
[OSEv3:children]
@@ -641,6 +641,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_prometheus_storage_volume_size=10Gi
#openshift_prometheus_storage_labels={'storage': 'prometheus'}
#openshift_prometheus_storage_type='pvc'
+#openshift_prometheus_storage_class=glusterfs-storage
# For prometheus-alertmanager
#openshift_prometheus_alertmanager_storage_kind=nfs
#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
@@ -650,6 +651,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_prometheus_alertmanager_storage_volume_size=10Gi
#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
#openshift_prometheus_alertmanager_storage_type='pvc'
+#openshift_prometheus_alertmanager_storage_class=glusterfs-storage
# For prometheus-alertbuffer
#openshift_prometheus_alertbuffer_storage_kind=nfs
#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
@@ -659,6 +661,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_prometheus_alertbuffer_storage_volume_size=10Gi
#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
#openshift_prometheus_alertbuffer_storage_type='pvc'
+#openshift_prometheus_alertbuffer_storage_class=glusterfs-storage
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
@@ -672,6 +675,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_prometheus_storage_volume_size=10Gi
#openshift_prometheus_storage_labels={'storage': 'prometheus'}
#openshift_prometheus_storage_type='pvc'
+#openshift_prometheus_storage_class=glusterfs-storage
# For prometheus-alertmanager
#openshift_prometheus_alertmanager_storage_kind=nfs
#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
@@ -681,6 +685,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_prometheus_alertmanager_storage_volume_size=10Gi
#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
#openshift_prometheus_alertmanager_storage_type='pvc'
+#openshift_prometheus_alertmanager_storage_class=glusterfs-storage
# For prometheus-alertbuffer
#openshift_prometheus_alertbuffer_storage_kind=nfs
#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
@@ -690,6 +695,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_prometheus_alertbuffer_storage_volume_size=10Gi
#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
#openshift_prometheus_alertbuffer_storage_type='pvc'
+#openshift_prometheus_alertbuffer_storage_class=glusterfs-storage
#
# Option C - none -- Prometheus, alertmanager and alertbuffer will use emptydir volumes
# which are destroyed when pods are deleted
@@ -1047,7 +1053,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# name and password AND are trying to use integration scripts.
#
# For example, adding this cluster as a container provider,
-# playbooks/byo/openshift-management/add_container_provider.yml
+# playbooks/openshift-management/add_container_provider.yml
#openshift_management_username: admin
#openshift_management_password: smartvm
diff --git a/inventory/byo/hosts.byo.glusterfs.external.example b/inventory/hosts.glusterfs.external.example
index acf68266e..bf2557cf0 100644
--- a/inventory/byo/hosts.byo.glusterfs.external.example
+++ b/inventory/hosts.glusterfs.external.example
@@ -1,11 +1,11 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
+# This is an example of an OpenShift-Ansible host inventory for a cluster
# with natively hosted, containerized GlusterFS storage.
#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
+# This inventory may be used with the deploy_cluster.yml playbook to deploy a new
# cluster with GlusterFS storage, which will use that storage to create a
# volume that will provide backend storage for a hosted Docker registry.
#
-# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# This inventory may also be used with openshift-glusterfs/config.yml to
# deploy GlusterFS storage on an existing cluster. With this playbook, the
# registry backend volume will be created but the administrator must then
# either deploy a hosted registry or change an existing hosted registry to use
@@ -13,7 +13,7 @@
#
# There are additional configuration parameters that can be specified to
# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# documentation in playbooks/openshift-glusterfs/README.md and
# roles/openshift_storage_glusterfs/README.md for additional details.
[OSEv3:children]
@@ -44,7 +44,7 @@ node2 openshift_schedulable=True
master
# Specify the glusterfs group, which contains the nodes of the external
-# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
+# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
# and "glusterfs_devices" variables defined.
#
# The first variable indicates the hostname of the external GLusterFS node,
diff --git a/inventory/byo/hosts.byo.glusterfs.mixed.example b/inventory/hosts.glusterfs.mixed.example
index a559dc377..8a20a037e 100644
--- a/inventory/byo/hosts.byo.glusterfs.mixed.example
+++ b/inventory/hosts.glusterfs.mixed.example
@@ -1,11 +1,11 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
+# This is an example of an OpenShift-Ansible host inventory for a cluster
# with natively hosted, containerized GlusterFS storage.
#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
+# This inventory may be used with the deploy_cluster.yml playbook to deploy a new
# cluster with GlusterFS storage, which will use that storage to create a
# volume that will provide backend storage for a hosted Docker registry.
#
-# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# This inventory may also be used with openshift-glusterfs/config.yml to
# deploy GlusterFS storage on an existing cluster. With this playbook, the
# registry backend volume will be created but the administrator must then
# either deploy a hosted registry or change an existing hosted registry to use
@@ -13,7 +13,7 @@
#
# There are additional configuration parameters that can be specified to
# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# documentation in playbooks/openshift-glusterfs/README.md and
# roles/openshift_storage_glusterfs/README.md for additional details.
[OSEv3:children]
@@ -47,7 +47,7 @@ node2 openshift_schedulable=True
master
# Specify the glusterfs group, which contains the nodes of the external
-# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
+# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
# and "glusterfs_devices" variables defined.
#
# The first variable indicates the hostname of the external GLusterFS node,
diff --git a/inventory/byo/hosts.byo.glusterfs.native.example b/inventory/hosts.glusterfs.native.example
index ca4765c53..59acf1194 100644
--- a/inventory/byo/hosts.byo.glusterfs.native.example
+++ b/inventory/hosts.glusterfs.native.example
@@ -1,16 +1,16 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
+# This is an example of an OpenShift-Ansible host inventory for a cluster
# with natively hosted, containerized GlusterFS storage for applications. It
-# will also autmatically create a StorageClass for this purpose.
+# will also automatically create a StorageClass for this purpose.
#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
+# This inventory may be used with the deploy_cluster.yml playbook to deploy a new
# cluster with GlusterFS storage.
#
-# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# This inventory may also be used with openshift-glusterfs/config.yml to
# deploy GlusterFS storage on an existing cluster.
#
# There are additional configuration parameters that can be specified to
# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# documentation in playbooks/openshift-glusterfs/README.md and
# roles/openshift_storage_glusterfs/README.md for additional details.
[OSEv3:children]
diff --git a/inventory/byo/hosts.byo.glusterfs.registry-only.example b/inventory/hosts.glusterfs.registry-only.example
index 32040f593..6f33e9f6d 100644
--- a/inventory/byo/hosts.byo.glusterfs.registry-only.example
+++ b/inventory/hosts.glusterfs.registry-only.example
@@ -1,12 +1,12 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
+# This is an example of an OpenShift-Ansible host inventory for a cluster
# with natively hosted, containerized GlusterFS storage for exclusive use
# as storage for a natively hosted Docker registry.
#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
+# This inventory may be used with the deploy_cluster.yml playbook to deploy a new
# cluster with GlusterFS storage, which will use that storage to create a
# volume that will provide backend storage for a hosted Docker registry.
#
-# This inventory may also be used with byo/openshift-glusterfs/registry.yml to
+# This inventory may also be used with openshift-glusterfs/registry.yml to
# deploy GlusterFS storage on an existing cluster. With this playbook, the
# registry backend volume will be created but the administrator must then
# either deploy a hosted registry or change an existing hosted registry to use
@@ -14,7 +14,7 @@
#
# There are additional configuration parameters that can be specified to
# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# documentation in playbooks/openshift-glusterfs/README.md and
# roles/openshift_storage_glusterfs/README.md for additional details.
[OSEv3:children]
diff --git a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example b/inventory/hosts.glusterfs.storage-and-registry.example
index 9bd37cbf6..1f3a4282a 100644
--- a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example
+++ b/inventory/hosts.glusterfs.storage-and-registry.example
@@ -1,12 +1,12 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
+# This is an example of an OpenShift-Ansible host inventory for a cluster
# with natively hosted, containerized GlusterFS storage for both general
# application use and a natively hosted Docker registry. It will also create a
# StorageClass for the general storage.
#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
+# This inventory may be used with the deploy_cluster.yml playbook to deploy a new
# cluster with GlusterFS storage.
#
-# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# This inventory may also be used with openshift-glusterfs/config.yml to
# deploy GlusterFS storage on an existing cluster. With this playbook, the
# registry backend volume will be created but the administrator must then
# either deploy a hosted registry or change an existing hosted registry to use
@@ -14,7 +14,7 @@
#
# There are additional configuration parameters that can be specified to
# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# documentation in playbooks/openshift-glusterfs/README.md and
# roles/openshift_storage_glusterfs/README.md for additional details.
[OSEv3:children]
diff --git a/inventory/byo/hosts.openstack b/inventory/hosts.openstack
index c648078c4..d928c2b86 100644
--- a/inventory/byo/hosts.openstack
+++ b/inventory/hosts.openstack
@@ -1,4 +1,4 @@
-# This is an example of a bring your own (byo) host inventory
+# This is an example of an OpenShift-Ansible host inventory
# Create an OSEv3 group that contains the masters and nodes groups
[OSEv3:children]
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 7d543afdd..0d5964dda 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,14 +10,14 @@
Name: openshift-ansible
Version: 3.9.0
-Release: 0.0.0%{?dist}
+Release: 0.10.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
Source0: https://github.com/openshift/openshift-ansible/archive/%{commit}/%{name}-%{version}.tar.gz
BuildArch: noarch
-Requires: ansible >= 2.3
+Requires: ansible >= 2.4.1
Requires: python2
Requires: python-six
Requires: tar
@@ -67,7 +67,7 @@ rm -f %{buildroot}%{python_sitelib}/openshift_ansible/gce
# openshift-ansible-docs install
# Install example inventory into docs/examples
mkdir -p docs/example-inventories
-cp inventory/byo/* docs/example-inventories/
+cp inventory/* docs/example-inventories/
# openshift-ansible-files install
cp -rp files %{buildroot}%{_datadir}/ansible/%{name}/
@@ -285,14 +285,190 @@ Atomic OpenShift Utilities includes
%changelog
+* Thu Dec 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.10.0
+- Bump requirements.txt to Ansible 2.4.1 (rteague@redhat.com)
+- Commit to stabalize RHSM operations. This code is derived from contrib
+ (mazzystr@gmail.com)
+- Contiv systemd fixes (flamingo@2thebatcave.com)
+- Combine openshift_master/vars with defaults (mgugino@redhat.com)
+- crio: change socket path to /var/run/crio/crio.sock (gscrivan@redhat.com)
+- Remove version requirement from openvswitch package, since listed version got
+ removed from repo (riffraff@hobbes.alephone.org)
+
+* Thu Dec 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.9.0
+- etcd: use Fedora /latest/ instead of hardcoding the version
+ (gscrivan@redhat.com)
+- docker: use Fedora /latest/ instead of hardcoding the version
+ (gscrivan@redhat.com)
+- upgrade node mark 2 (mgugino@redhat.com)
+- Refactor node upgrade to include less serial tasks (mgugino@redhat.com)
+- fix 1519808. Only annotate ops projects when openshift_logging_use_ops=true
+ (jcantril@redhat.com)
+- Ensure that clients are version bound (sdodson@redhat.com)
+- Support for making glusterfs storage class a default one.
+ (jmencak@redhat.com)
+- Add support for storage classes to openshift_prometheus role.
+ (jmencak@redhat.com)
+- Do not escalate privileges in logging stack deployment task
+ (iacopo.rozzo@amadeus.com)
+- Multimaster openshift+contiv fixes (landillo@cisco.com)
+- Sync latest image-streams and templates (alexandre.lossent@cern.ch)
+
+* Tue Dec 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.8.0
+- Remove empty openshift_hosted_facts role (mgugino@redhat.com)
+- Refactor upgrade codepaths step 1 (mgugino@redhat.com)
+
+* Tue Dec 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.7.0
+- Remove bad openshift_examples symlink (rteague@redhat.com)
+- Changing the node group format to a list. (kwoodson@redhat.com)
+- Bump RPM version requirement (sdodson@redhat.com)
+- Clarify version selection in README (mgugino@redhat.com)
+
+* Tue Dec 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.6.0
+- add openshift_master_api_port var to example inventory (jdiaz@redhat.com)
+- Allow 2 sets of hostnames for openstack provider (bdobreli@redhat.com)
+
+* Mon Dec 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.5.0
+- Remove unneeded embedded etcd logic (mgugino@redhat.com)
+
+* Mon Dec 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.4.0
+- Copying upstream fix for ansible 2.4 ec2_group module. (kwoodson@redhat.com)
+- Add missing dependencies on openshift_facts role (sdodson@redhat.com)
+
+* Mon Dec 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.3.0
+- remove integration tests from tox (lmeyer@redhat.com)
+- correct ansible-playbook command syntax (jdiaz@redhat.com)
+- Add openshift_facts to upgrade plays for service_type (mgugino@redhat.com)
+- Check for openshift attribute before using it during CNS install.
+ (jmencak@redhat.com)
+
+* Mon Dec 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.2.0
+- GlusterFS: Add playbook doc note (jarrpa@redhat.com)
+- Fix openshift hosted registry rollout (rteague@redhat.com)
+- Remove container_runtime from the openshift_version (sdodson@redhat.com)
+
+* Fri Dec 08 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.1.0
+- Cleanup byo references (rteague@redhat.com)
+- openshift_node: reintroduce restart of CRI-O. (gscrivan@redhat.com)
+- container-engine: skip openshift_docker_log_driver when it is False
+ (gscrivan@redhat.com)
+- container-engine: log-opts is a dictionary in the daemon.json file
+ (gscrivan@redhat.com)
+- openshift_version: add dependency to openshift_facts (gscrivan@redhat.com)
+- openshift_version: define openshift_use_crio_only (gscrivan@redhat.com)
+- openshift_version: add dependency to container_runtime (gscrivan@redhat.com)
+- crio: define and use l_is_node_system_container (gscrivan@redhat.com)
+- Update deprecation checks - include: (rteague@redhat.com)
+- Add os_firewall to prerequisites.yml (mgugino@redhat.com)
+- add 3.8 templates for gluster ep and svc (lmeyer@redhat.com)
+- Remove openshift.common.service_type (mgugino@redhat.com)
+- Remove unused openshift_env_structures and openshift_env (mgugino@redhat.com)
+- Fix incorrect register name master registry auth (mgugino@redhat.com)
+- Include Deprecation: Convert to import_playbook (rteague@redhat.com)
+- add 3.8 templates for gluster ep and svc (m.judeikis@gmail.com)
+- Remove all uses of openshift.common.admin_binary (sdodson@redhat.com)
+- Implement container_runtime playbooks and changes (mgugino@redhat.com)
+- Playbook Consolidation - byo/config.yml (rteague@redhat.com)
+- openshift_logging_kibana: fix mixing paren (lmeyer@redhat.com)
+- Fix ami building. (kwoodson@redhat.com)
+- Include Deprecation: Convert to include_tasks (rteague@redhat.com)
+- Add missing symlinks in openshift-logging (rteague@redhat.com)
+- Fix generate_pv_pvcs_list plugin undef (mgugino@redhat.com)
+- Playbook Consolidation - etcd Upgrade (rteague@redhat.com)
+- bug 1519622. Disable rollback of ES DCs (jcantril@redhat.com)
+- Remove all references to pacemaker (pcs, pcsd) and
+ openshift.master.cluster_method. (abutcher@redhat.com)
+- Remove entry point files no longer needed by CI (rteague@redhat.com)
+- Don't check for the deployment_type (tomas@sedovic.cz)
+- Get the correct value out of openshift_release (tomas@sedovic.cz)
+- Fix oreg_auth_credentials_create register var (mgugino@redhat.com)
+- Fix and cleanup not required dns bits (bdobreli@redhat.com)
+- Fix hosted vars (mgugino@redhat.com)
+- Remove duplicate init import in network_manager.yml (rteague@redhat.com)
+- Document testing repos for dev purposes (bdobreli@redhat.com)
+- Remove unused protected_facts_to_overwrite (mgugino@redhat.com)
+- Use openshift testing repos for openstack (bdobreli@redhat.com)
+- Use openshift_release instead of ose_version (tomas@sedovic.cz)
+- Remove the ose_version check (tomas@sedovic.cz)
+- Allow number of retries in openshift_management to be configurable
+ (ealfassa@redhat.com)
+- Bumping to 3.9 (smunilla@redhat.com)
+- Cleanup unused openstack provider code (bdobreli@redhat.com)
+- Adding 3.9 tito releaser (smunilla@redhat.com)
+- Implement container runtime role (mgugino@redhat.com)
+- Fix glusterfs checkpoint info (rteague@redhat.com)
+- storage_glusterfs: fix typo (lmeyer@redhat.com)
+- Playbook Consolidation - Redeploy Certificates (rteague@redhat.com)
+- Fix tox (tomas@sedovic.cz)
+- Remove shell environment lookup (tomas@sedovic.cz)
+- Revert "Fix syntax error caused by an extra paren" (tomas@sedovic.cz)
+- Revert "Fix the env lookup fallback in rhel_subscribe" (tomas@sedovic.cz)
+- Remove reading shell environment in rhel_subscribe (tomas@sedovic.cz)
+- retry package operations (lmeyer@redhat.com)
+- Add v3.9 support (sdodson@redhat.com)
+- Playbook Consolidation - openshift-logging (rteague@redhat.com)
+- Do not escalate privileges in jks generation tasks (iacopo.rozzo@amadeus.com)
+- Fix inventory symlinks in origin-ansible container. (dgoodwin@redhat.com)
+- Initial upgrade for scale groups. (kwoodson@redhat.com)
+- Update the doc text (tomas@sedovic.cz)
+- Optionally subscribe OpenStack RHEL nodes (tomas@sedovic.cz)
+- Fix the env lookup fallback in rhel_subscribe (tomas@sedovic.cz)
+- Fix syntax error caused by an extra paren (tomas@sedovic.cz)
+- Fix no_log warnings for custom module (mgugino@redhat.com)
+- Add external_svc_subnet for k8s loadbalancer type service
+ (jihoon.o@samsung.com)
+- Remove openshift_facts project_cfg_facts (mgugino@redhat.com)
+- Remove dns_port fact (mgugino@redhat.com)
+- Bug 1512793- Fix idempotence issues in ASB deploy (fabian@fabianism.us)
+- Remove unused task file from etcd role (rteague@redhat.com)
+- fix type in authroize (jchaloup@redhat.com)
+- Use IP addresses for OpenStack nodes (tomas@sedovic.cz)
+- Update prometheus to 2.0.0 GA (zgalor@redhat.com)
+- remove schedulable from openshift_facts (mgugino@redhat.com)
+- inventory: Add example for service catalog vars (smilner@redhat.com)
+- Correct usage of include_role (rteague@redhat.com)
+- Remove openshift.common.cli_image (mgugino@redhat.com)
+- Fix openshift_env fact creation within openshift_facts. (abutcher@redhat.com)
+- Combine openshift_node and openshift_node_dnsmasq (mgugino@redhat.com)
+- GlusterFS: Remove extraneous line from glusterblock template
+ (jarrpa@redhat.com)
+- Remove openshift_clock from meta depends (mgugino@redhat.com)
+- Simplify is_master_system_container logic (mgugino@redhat.com)
+- dist.iteritems() no longer exists in Python 3. (jpazdziora@redhat.com)
+- Remove spurrious file committed by error (diego.abelenda@camptocamp.com)
+- Fix name of the service pointed to by hostname
+ (diego.abelenda@camptocamp.com)
+- Missed the default value after the variable name change...
+ (diego.abelenda@camptocamp.com)
+- Change the name of the variable and explicitely document the names
+ (diego.abelenda@camptocamp.com)
+- Allow to set the hostname for routes to prometheus and alertmanager
+ (diego.abelenda@camptocamp.com)
+- Allow openshift_install_examples to be false (michael.fraenkel@gmail.com)
+- Include Deprecation - openshift-service-catalog (rteague@redhat.com)
+- Remove is_openvswitch_system_container from facts (mgugino@redhat.com)
+- Workaround the fact that package state=present with dnf fails for already
+ installed but excluded packages. (jpazdziora@redhat.com)
+- With dnf repoquery and excluded packages, --disableexcludes=all is needed to
+ list the package with --installed. (jpazdziora@redhat.com)
+- Add support for external glusterfs as registry backend (m.judeikis@gmail.com)
+- cri-o: honor additional and insecure registries again (gscrivan@redhat.com)
+- docker: copy Docker metadata to the alternative storage path
+ (gscrivan@redhat.com)
+- Add check for gluterFS DS to stop restarts (m.judeikis@gmail.com)
+- Bug 1514417 - Adding correct advertise-client-urls (shawn.hurley21@gmail.com)
+- Uninstall tuned-profiles-atomic-openshift-node as defined in origin.spec
+ (jmencak@redhat.com)
+- Mod startup script to publish all frontend binds (cwilkers@redhat.com)
+
* Thu Nov 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.13.0
--
+-
* Thu Nov 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.12.0
--
+-
* Thu Nov 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.11.0
--
+-
* Thu Nov 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.10.0
- tox.ini: simplify unit test reqs (lmeyer@redhat.com)
@@ -341,16 +517,16 @@ Atomic OpenShift Utilities includes
- Include Deprecation - Init Playbook Paths (rteague@redhat.com)
* Mon Nov 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.8.0
--
+-
* Mon Nov 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.7.0
--
+-
* Mon Nov 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.6.0
--
+-
* Sun Nov 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.5.0
--
+-
* Sun Nov 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.4.0
- bug 1498398. Enclose content between store tag (rromerom@redhat.com)
@@ -643,10 +819,10 @@ Atomic OpenShift Utilities includes
- Allow cluster IP for docker-registry service to be set (hansmi@vshn.ch)
* Thu Nov 09 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.5-1
--
+-
* Wed Nov 08 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.4-1
--
+-
* Wed Nov 08 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.3-1
- Adding configuration for keeping transient namespace on error.
@@ -816,10 +992,10 @@ Atomic OpenShift Utilities includes
- GlusterFS: Remove image option from heketi command (jarrpa@redhat.com)
* Mon Oct 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.187.0
--
+-
* Sun Oct 29 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.186.0
--
+-
* Sat Oct 28 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.185.0
- bug 1506073. Lower cpu request for logging when it exceeds limit
@@ -849,7 +1025,7 @@ Atomic OpenShift Utilities includes
- Refactor health check playbooks (rteague@redhat.com)
* Fri Oct 27 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.183.0
--
+-
* Thu Oct 26 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.182.0
- Fixing documentation for the cert_key_path variable name.
@@ -923,16 +1099,16 @@ Atomic OpenShift Utilities includes
(hansmi@vshn.ch)
* Mon Oct 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.175.0
--
+-
* Sun Oct 22 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.174.0
--
+-
* Sun Oct 22 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.173.0
--
+-
* Sun Oct 22 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.172.0
--
+-
* Sat Oct 21 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.171.0
- Use "requests" for CPU resources instead of limits
@@ -956,16 +1132,16 @@ Atomic OpenShift Utilities includes
(dymurray@redhat.com)
* Fri Oct 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.168.0
--
+-
* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.167.0
--
+-
* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.166.0
--
+-
* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.165.0
--
+-
* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.164.0
- Change to service-signer.crt for template_service_broker CA_BUNDLE
@@ -988,7 +1164,7 @@ Atomic OpenShift Utilities includes
- Remove unneeded master config updates during upgrades (mgugino@redhat.com)
* Wed Oct 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.161.0
--
+-
* Wed Oct 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.160.0
- Fix pvc selector default to be empty dict instead of string
@@ -1030,16 +1206,16 @@ Atomic OpenShift Utilities includes
(jchaloup@redhat.com)
* Sun Oct 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.155.0
--
+-
* Sat Oct 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.154.0
--
+-
* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.153.0
- default groups.oo_new_etcd_to_config to an empty list (jchaloup@redhat.com)
* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.152.0
--
+-
* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.151.0
- updated dynamic provision section for openshift metrics to support storage
@@ -1448,7 +1624,7 @@ Atomic OpenShift Utilities includes
- oc_atomic_container: support Skopeo output (gscrivan@redhat.com)
* Tue Sep 05 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.125.0
--
+-
* Tue Sep 05 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.124.0
- Fix ansible_syntax check (rteague@redhat.com)
@@ -1475,7 +1651,7 @@ Atomic OpenShift Utilities includes
(miciah.masters@gmail.com)
* Wed Aug 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.123.0
--
+-
* Wed Aug 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.122.0
- Update openshift_hosted_routers example to be in ini format.
@@ -1537,10 +1713,10 @@ Atomic OpenShift Utilities includes
- Add missing hostnames to registry cert (sdodson@redhat.com)
* Fri Aug 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.115.0
--
+-
* Fri Aug 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.114.0
--
+-
* Fri Aug 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.113.0
- openshift_version: enterprise accepts new style pre-release
@@ -1558,13 +1734,13 @@ Atomic OpenShift Utilities includes
- Setup tuned profiles in /etc/tuned (jmencak@redhat.com)
* Thu Aug 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.109.0
--
+-
* Thu Aug 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.108.0
--
+-
* Thu Aug 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.107.0
--
+-
* Thu Aug 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.106.0
- Add dotnet 2.0 to v3.6 (sdodson@redhat.com)
@@ -1601,13 +1777,13 @@ Atomic OpenShift Utilities includes
(sdodson@redhat.com)
* Sat Aug 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.103.0
--
+-
* Fri Aug 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.102.0
--
+-
* Fri Aug 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.101.0
--
+-
* Fri Aug 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.100.0
- Change memory requests and limits units (mak@redhat.com)
@@ -1906,13 +2082,13 @@ Atomic OpenShift Utilities includes
(kwoodson@redhat.com)
* Mon Jul 17 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.152-1
--
+-
* Sun Jul 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.151-1
--
+-
* Sun Jul 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.150-1
--
+-
* Sat Jul 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.149-1
- Config was missed before replace. (jkaur@redhat.com)
@@ -1935,7 +2111,7 @@ Atomic OpenShift Utilities includes
- GlusterFS: Fix SSH-based heketi configuration (jarrpa@redhat.com)
* Wed Jul 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.143-1
--
+-
* Wed Jul 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.142-1
- add scheduled pods check (jvallejo@redhat.com)
@@ -1960,7 +2136,7 @@ Atomic OpenShift Utilities includes
- updating fetch tasks to be flat paths (ewolinet@redhat.com)
* Mon Jul 10 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.140-1
--
+-
* Sat Jul 08 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.139-1
- increase implicit 300s default timeout to explicit 600s (jchaloup@redhat.com)
@@ -2008,7 +2184,7 @@ Atomic OpenShift Utilities includes
- Fully qualify ocp ansible_service_broker_image_prefix (sdodson@redhat.com)
* Wed Jul 05 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.134-1
--
+-
* Tue Jul 04 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.133-1
- etcd, syscontainer: fix copy of existing datastore (gscrivan@redhat.com)
@@ -2020,7 +2196,7 @@ Atomic OpenShift Utilities includes
- Fixes to storage migration (sdodson@redhat.com)
* Mon Jul 03 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.132-1
--
+-
* Sun Jul 02 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.131-1
- Fix upgrade (sdodson@redhat.com)
@@ -2161,7 +2337,7 @@ Atomic OpenShift Utilities includes
- bug 1457642. Use same SG index to avoid seeding timeout (jcantril@redhat.com)
* Wed Jun 21 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.122-1
--
+-
* Tue Jun 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.121-1
- Updating default from null to "" (ewolinet@redhat.com)
@@ -2205,7 +2381,7 @@ Atomic OpenShift Utilities includes
- CloudForms 4.5 templates (simaishi@redhat.com)
* Fri Jun 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.114-1
--
+-
* Fri Jun 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.113-1
- Make rollout status check best-effort, add poll (skuznets@redhat.com)
@@ -2267,7 +2443,7 @@ Atomic OpenShift Utilities includes
- singletonize some role tasks that repeat a lot (lmeyer@redhat.com)
* Wed Jun 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.109-1
--
+-
* Wed Jun 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.108-1
- Upgraded Calico to 2.2.1 Release (vincent.schwarzer@yahoo.de)
@@ -2323,7 +2499,7 @@ Atomic OpenShift Utilities includes
- Install default storageclass in AWS & GCE envs (hekumar@redhat.com)
* Fri Jun 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.98-1
--
+-
* Fri Jun 09 2017 Scott Dodson <sdodson@redhat.com> 3.6.97-1
- Updated to using oo_random_word for secret gen (ewolinet@redhat.com)
@@ -2355,7 +2531,7 @@ Atomic OpenShift Utilities includes
loopback kubeconfigs. (abutcher@redhat.com)
* Tue Jun 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.2-1
--
+-
* Tue Jun 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.1-1
- Updating image for registry_console (ewolinet@redhat.com)
@@ -2602,13 +2778,13 @@ Atomic OpenShift Utilities includes
- Fix additional master cert & client config creation. (abutcher@redhat.com)
* Tue May 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.62-1
--
+-
* Tue May 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.61-1
--
+-
* Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.60-1
--
+-
* Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.59-1
- Updating logging and metrics to restart api, ha and controllers when updating
@@ -2621,10 +2797,10 @@ Atomic OpenShift Utilities includes
- Moving Dockerfile content to images dir (jupierce@redhat.com)
* Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.57-1
--
+-
* Sun May 07 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.56-1
--
+-
* Sat May 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.55-1
- Fix 1448368, and some other minors issues (ghuang@redhat.com)
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index ed7a7bd1a..9f044c089 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -123,7 +123,7 @@
- origin-clients
- origin-node
- origin-sdn-ovs
- - tuned-profiles-openshift-node
+ - tuned-profiles-atomic-openshift-node
- tuned-profiles-origin-node
register: result
until: result | success
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
index 417fb539a..d203b9cda 100644
--- a/playbooks/aws/README.md
+++ b/playbooks/aws/README.md
@@ -75,7 +75,7 @@ If customization is required for the instances, scale groups, or any other confi
In order to create the bootstrap-able AMI we need to create a basic openshift-ansible inventory. This enables us to create the AMI using the openshift-ansible node roles. This inventory should not include any hosts, but certain variables should be defined in the appropriate groups, just as deploying a cluster
using the normal openshift-ansible method. See provisioning-inventory.example.ini for an example.
-There are more examples of cluster inventory settings [`here`](../../inventory/byo/).
+There are more examples of cluster inventory settings [`here`](../../inventory/).
#### Step 0 (optional)
@@ -134,11 +134,11 @@ At this point we have successfully created the infrastructure including the mast
Now it is time to install Openshift using the openshift-ansible installer. This can be achieved by running the following playbook:
```
-$ ansible-playbook -i inventory.yml install.yml @provisioning_vars.yml
+$ ansible-playbook -i inventory.yml install.yml -e @provisioning_vars.yml
```
This playbook accomplishes the following:
1. Builds a dynamic inventory file by querying AWS.
-2. Runs the [`byo`](../../common/openshift-cluster/config.yml)
+2. Runs the [`deploy_cluster.yml`](../deploy_cluster.yml)
Once this playbook completes, the cluster masters should be installed and configured.
diff --git a/playbooks/aws/openshift-cluster/accept.yml b/playbooks/aws/openshift-cluster/accept.yml
index cab2f1e40..e7bed4f6e 100755
--- a/playbooks/aws/openshift-cluster/accept.yml
+++ b/playbooks/aws/openshift-cluster/accept.yml
@@ -18,7 +18,7 @@
name: lib_openshift
- name: fetch masters
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region | default('us-east-1') }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
@@ -30,7 +30,7 @@
until: "'instances' in mastersout and mastersout.instances|length > 0"
- name: fetch new node instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region | default('us-east-1') }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml
index 5815c4975..5bf4f652a 100644
--- a/playbooks/aws/openshift-cluster/build_ami.yml
+++ b/playbooks/aws/openshift-cluster/build_ami.yml
@@ -17,7 +17,7 @@
- name: openshift_aws_region
msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
-- include: provision_instance.yml
+- import_playbook: provision_instance.yml
vars:
openshift_aws_node_group_type: compute
@@ -33,8 +33,8 @@
# This is the part that installs all of the software and configs for the instance
# to become a node.
-- include: ../../openshift-node/private/image_prep.yml
+- import_playbook: ../../openshift-node/private/image_prep.yml
-- include: seal_ami.yml
+- import_playbook: seal_ami.yml
vars:
openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}"
diff --git a/playbooks/aws/openshift-cluster/hosted.yml b/playbooks/aws/openshift-cluster/hosted.yml
index 1dabae357..9d9ed29de 100644
--- a/playbooks/aws/openshift-cluster/hosted.yml
+++ b/playbooks/aws/openshift-cluster/hosted.yml
@@ -1,19 +1,19 @@
---
-- include: ../../openshift-hosted/private/config.yml
+- import_playbook: ../../openshift-hosted/private/config.yml
-- include: ../../openshift-metrics/private/config.yml
+- import_playbook: ../../openshift-metrics/private/config.yml
when: openshift_metrics_install_metrics | default(false) | bool
-- include: ../../openshift-logging/private/config.yml
+- import_playbook: ../../openshift-logging/private/config.yml
when: openshift_logging_install_logging | default(false) | bool
-- include: ../../openshift-prometheus/private/config.yml
+- import_playbook: ../../openshift-prometheus/private/config.yml
when: openshift_hosted_prometheus_deploy | default(false) | bool
-- include: ../../openshift-service-catalog/private/config.yml
+- import_playbook: ../../openshift-service-catalog/private/config.yml
when: openshift_enable_service_catalog | default(false) | bool
-- include: ../../openshift-management/private/config.yml
+- import_playbook: ../../openshift-management/private/config.yml
when: openshift_management_install_management | default(false) | bool
- name: Print deprecated variable warning message if necessary
diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml
index f8206529a..b03fb0b7f 100644
--- a/playbooks/aws/openshift-cluster/install.yml
+++ b/playbooks/aws/openshift-cluster/install.yml
@@ -16,31 +16,31 @@
tasks_from: master_facts.yml
- name: run the init
- include: ../../init/main.yml
+ import_playbook: ../../init/main.yml
- name: perform the installer openshift-checks
- include: ../../openshift-checks/private/install.yml
+ import_playbook: ../../openshift-checks/private/install.yml
- name: etcd install
- include: ../../openshift-etcd/private/config.yml
+ import_playbook: ../../openshift-etcd/private/config.yml
- name: include nfs
- include: ../../openshift-nfs/private/config.yml
+ import_playbook: ../../openshift-nfs/private/config.yml
when: groups.oo_nfs_to_config | default([]) | count > 0
- name: include loadbalancer
- include: ../../openshift-loadbalancer/private/config.yml
+ import_playbook: ../../openshift-loadbalancer/private/config.yml
when: groups.oo_lb_to_config | default([]) | count > 0
- name: include openshift-master config
- include: ../../openshift-master/private/config.yml
+ import_playbook: ../../openshift-master/private/config.yml
- name: include master additional config
- include: ../../openshift-master/private/additional_config.yml
+ import_playbook: ../../openshift-master/private/additional_config.yml
- name: include master additional config
- include: ../../openshift-node/private/config.yml
+ import_playbook: ../../openshift-node/private/config.yml
- name: include openshift-glusterfs
- include: ../../openshift-glusterfs/private/config.yml
+ import_playbook: ../../openshift-glusterfs/private/config.yml
when: groups.oo_glusterfs_to_config | default([]) | count > 0
diff --git a/playbooks/aws/openshift-cluster/prerequisites.yml b/playbooks/aws/openshift-cluster/prerequisites.yml
index f5eb01b14..0afcce331 100644
--- a/playbooks/aws/openshift-cluster/prerequisites.yml
+++ b/playbooks/aws/openshift-cluster/prerequisites.yml
@@ -1,6 +1,6 @@
---
-- include: provision_vpc.yml
+- import_playbook: provision_vpc.yml
-- include: provision_ssh_keypair.yml
+- import_playbook: provision_ssh_keypair.yml
-- include: provision_sec_group.yml
+- import_playbook: provision_sec_group.yml
diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml
index 78dd6a49b..f98f5be9a 100644
--- a/playbooks/aws/openshift-cluster/provision_install.yml
+++ b/playbooks/aws/openshift-cluster/provision_install.yml
@@ -4,16 +4,16 @@
# this playbook is run with the following parameters:
# ansible-playbook -i openshift-ansible-inventory provision_install.yml
- name: Include the provision.yml playbook to create cluster
- include: provision.yml
+ import_playbook: provision.yml
- name: Include the install.yml playbook to install cluster on masters
- include: install.yml
+ import_playbook: install.yml
- name: provision the infra/compute playbook to install node resources
- include: provision_nodes.yml
+ import_playbook: provision_nodes.yml
- name: Include the accept.yml playbook to accept nodes into the cluster
- include: accept.yml
+ import_playbook: accept.yml
- name: Include the hosted.yml playbook to finish the hosted configuration
- include: hosted.yml
+ import_playbook: hosted.yml
diff --git a/playbooks/aws/provisioning-inventory.example.ini b/playbooks/aws/provisioning-inventory.example.ini
index 238a7eb2f..cf76c9d10 100644
--- a/playbooks/aws/provisioning-inventory.example.ini
+++ b/playbooks/aws/provisioning-inventory.example.ini
@@ -11,6 +11,7 @@ etcd
openshift_deployment_type=origin
openshift_master_bootstrap_enabled=True
+openshift_master_api_port=443
openshift_hosted_router_wait=False
openshift_hosted_registry_wait=False
diff --git a/playbooks/byo/config.yml b/playbooks/byo/config.yml
deleted file mode 100644
index 7d03914a2..000000000
--- a/playbooks/byo/config.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: openshift-cluster/config.yml
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
deleted file mode 100644
index 57823847b..000000000
--- a/playbooks/byo/openshift-cluster/config.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: ../../init/main.yml
-
-- include: ../../common/openshift-cluster/config.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
index c46b22331..76308465c 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
@@ -1,5 +1,5 @@
---
# Playbook to upgrade Docker to the max allowable version for an OpenShift cluster.
-- include: ../../../../init/evaluate_groups.yml
+- import_playbook: ../../../../init/evaluate_groups.yml
-- include: ../../../../common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/docker/docker_upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
deleted file mode 100644
index a9be8dec4..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: ../../../init/evaluate_groups.yml
-
-- include: ../../../common/openshift-cluster/upgrades/etcd/main.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
index c880fe7f7..0effc68bf 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -2,4 +2,4 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index aeec5f5cc..ebced5413 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -11,4 +11,4 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index 4664a9a2b..f2e97fc01 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -4,4 +4,4 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
index cbb89bc4d..f6fedfdff 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -2,4 +2,4 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index 1adfbdec0..b8b5f5762 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -11,4 +11,4 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
index b4da18281..c63f11b30 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -4,4 +4,4 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml
index 14b0f85d4..23a3fcbb5 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml
@@ -4,4 +4,4 @@
#
# Upgrades scale group nodes only.
#
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_scale_group.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/upgrade_scale_group.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml
index f7e5dd1d2..c4094aa7e 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -2,4 +2,4 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index cc04d81c1..5a3aa6288 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -11,4 +11,4 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
index 37a9f69bb..74981cc31 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
@@ -4,4 +4,4 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml
index e8f9d94e2..a2a9d59f2 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -2,4 +2,4 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index acb4195e3..869e185af 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -11,4 +11,4 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
index df19097e1..a5867434b 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
@@ -4,4 +4,4 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
index 29e0ebe8d..85a65b7e1 100644
--- a/playbooks/byo/openshift_facts.yml
+++ b/playbooks/byo/openshift_facts.yml
@@ -1,13 +1,12 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
- name: Gather Cluster facts
hosts: oo_all_hosts
roles:
- openshift_facts
tasks:
- - openshift_facts:
- openshift_env: "{{ hostvars[inventory_hostname] | oo_openshift_env }}"
+ - openshift_facts: {}
register: result
- debug:
var: result
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index 261143080..5a877809a 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -1,5 +1,5 @@
---
-- include: ../init/evaluate_groups.yml
+- import_playbook: ../init/evaluate_groups.yml
- name: Subscribe hosts, update repos and update OS packages
hosts: oo_all_hosts
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
deleted file mode 100644
index a8ca5e686..000000000
--- a/playbooks/common/openshift-cluster/config.yml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-- include: ../../openshift-checks/private/install.yml
-
-- include: ../../openshift-etcd/private/config.yml
-
-- include: ../../openshift-nfs/private/config.yml
- when: groups.oo_nfs_to_config | default([]) | count > 0
-
-- include: ../../openshift-loadbalancer/private/config.yml
- when: groups.oo_lb_to_config | default([]) | count > 0
-
-- include: ../../openshift-master/private/config.yml
-
-- include: ../../openshift-master/private/additional_config.yml
-
-- include: ../../openshift-node/private/config.yml
-
-- include: ../../openshift-glusterfs/private/config.yml
- when: groups.oo_glusterfs_to_config | default([]) | count > 0
-
-- include: ../../openshift-hosted/private/config.yml
-
-- include: ../../openshift-metrics/private/config.yml
- when: openshift_metrics_install_metrics | default(false) | bool
-
-- include: ../../openshift-logging/private/config.yml
- when: openshift_logging_install_logging | default(false) | bool
-
-- include: ../../openshift-prometheus/private/config.yml
- when: openshift_hosted_prometheus_deploy | default(false) | bool
-
-- include: ../../openshift-service-catalog/private/config.yml
- when: openshift_enable_service_catalog | default(true) | bool
-
-- include: ../../openshift-management/private/config.yml
- when: openshift_management_install_management | default(false) | bool
-
-- name: Print deprecated variable warning message if necessary
- hosts: oo_first_master
- gather_facts: no
- tasks:
- - debug: msg="{{__deprecation_message}}"
- when:
- - __deprecation_message | default ('') | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
index 6e953be69..ed97d539c 100644
--- a/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
+++ b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
@@ -1,22 +1 @@
---
-- name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- name: Remove unused Docker images for Docker 1.10+ migration
- shell: "docker rmi `docker images -aq`"
- # Will fail on images still in use:
- failed_when: false
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_excluders.yml
index 800621857..858912379 100644
--- a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
+++ b/playbooks/common/openshift-cluster/upgrades/disable_excluders.yml
@@ -1,11 +1,10 @@
---
- name: Disable excluders
- hosts: oo_masters_to_config
+ hosts: "{{ l_upgrade_excluder_hosts }}"
gather_facts: no
roles:
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
r_openshift_excluder_verify_upgrade: true
r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
r_openshift_excluder_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
deleted file mode 100644
index a66301c0d..000000000
--- a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: Disable excluders
- hosts: oo_nodes_to_upgrade:!oo_masters_to_config
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- r_openshift_excluder_verify_upgrade: true
- r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
- r_openshift_excluder_package_state: latest
- r_openshift_excluder_docker_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 6d4ddf011..38aa9df47 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -1,11 +1,11 @@
---
-- include: ../../../../init/evaluate_groups.yml
+- import_playbook: ../../../../init/evaluate_groups.yml
vars:
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
- name: Check for appropriate Docker versions
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
@@ -19,7 +19,9 @@
msg: Cannot upgrade Docker on Atomic operating systems.
when: openshift.common.is_atomic | bool
- - include: upgrade_check.yml
+ - include_role:
+ name: container_runtime
+ tasks_from: docker_upgrade_check.yml
when: docker_upgrade is not defined or docker_upgrade | bool
@@ -32,6 +34,7 @@
any_errors_fatal: true
roles:
+ - openshift_facts
- lib_openshift
tasks:
@@ -51,7 +54,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ openshift.common.client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
register: l_docker_upgrade_drain_result
@@ -59,7 +62,7 @@
retries: 60
delay: 60
- - include: tasks/upgrade.yml
+ - include_tasks: tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool
- name: Set node schedulability
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh b/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh
deleted file mode 100644
index 8635eab0d..000000000
--- a/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-
-# Stop any running containers
-running_container_ids=`docker ps -q`
-if test -n "$running_container_ids"
-then
- docker stop $running_container_ids
-fi
-
-# Delete all containers
-container_ids=`docker ps -a -q`
-if test -n "$container_ids"
-then
- docker rm -f -v $container_ids
-fi
-
-# Delete all images (forcefully)
-image_ids=`docker images -aq`
-if test -n "$image_ids"
-then
- # Some layers are deleted recursively and are no longer present
- # when docker goes to remove them:
- docker rmi -f `docker images -aq` || true
-fi
-
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
index 3b779becb..dbc4f39c7 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
@@ -11,9 +11,9 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
failed_when: false
when: openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
index 83be290e6..4856a4b51 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
@@ -4,9 +4,9 @@
- name: Stop containerized services
service: name={{ item }} state=stopped
with_items:
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
- etcd_container
- openvswitch
failed_when: false
@@ -44,5 +44,5 @@
register: result
until: result | success
-- include: restart.yml
+- include_tasks: restart.yml
when: not skip_docker_restart | default(False) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
index 2e3a7ae8b..ed97d539c 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -1,58 +1 @@
---
-
-# This snippet determines if a Docker upgrade is required by checking the inventory
-# variables, the available packages, and sets l_docker_upgrade to True if so.
-
-- set_fact:
- docker_upgrade: True
- when: docker_upgrade is not defined
-
-- name: Check if Docker is installed
- command: rpm -q docker
- args:
- warn: no
- register: pkg_check
- failed_when: pkg_check.rc > 1
- changed_when: no
-
-- name: Get current version of Docker
- command: "{{ repoquery_installed }} --qf '%{version}' docker"
- register: curr_docker_version
- retries: 4
- until: curr_docker_version | succeeded
- changed_when: false
-
-- name: Get latest available version of Docker
- command: >
- {{ repoquery_cmd }} --qf '%{version}' "docker"
- register: avail_docker_version
- retries: 4
- until: avail_docker_version | succeeded
- # Don't expect docker rpm to be available on hosts that don't already have it installed:
- when: pkg_check.rc == 0
- failed_when: false
- changed_when: false
-
-- fail:
- msg: This playbook requires access to Docker 1.12 or later
- # Disable the 1.12 requirement if the user set a specific Docker version
- when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.12','<')))
-
-# Default l_docker_upgrade to False, we'll set to True if an upgrade is required:
-- set_fact:
- l_docker_upgrade: False
-
-# Make sure a docker_version is set if none was requested:
-- set_fact:
- docker_version: "{{ avail_docker_version.stdout }}"
- when: pkg_check.rc == 0 and docker_version is not defined
-
-- name: Flag for Docker upgrade if necessary
- set_fact:
- l_docker_upgrade: True
- when: pkg_check.rc == 0 and curr_docker_version.stdout | version_compare(docker_version,'<')
-
-- name: Flag to delete all images prior to upgrade if crossing Docker 1.10 boundary
- set_fact:
- docker_upgrade_nuke_images: True
- when: l_docker_upgrade | bool and docker_upgrade_nuke_images is not defined and curr_docker_version.stdout | version_compare('1.10','<') and docker_version | version_compare('1.10','>=')
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins
deleted file mode 120000
index 27ddaa18b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins
deleted file mode 120000
index cf407f69b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/roles b/playbooks/common/openshift-cluster/upgrades/etcd/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 9981d905b..8ee83819e 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -1,14 +1,14 @@
---
-- include: ../../../init/evaluate_groups.yml
+- import_playbook: ../../../init/evaluate_groups.yml
vars:
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
-- include: ../../../init/facts.yml
+- import_playbook: ../../../init/facts.yml
- name: Ensure firewall is not switched during upgrade
- hosts: oo_all_hosts
+ hosts: "{{ l_upgrade_no_switch_firewall_hosts | default('oo_all_hosts') }}"
vars:
openshift_master_installed_version: "{{ hostvars[groups.oo_first_master.0].openshift.common.version }}"
tasks:
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index c458184c9..344ddea3c 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -114,7 +114,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
post_tasks:
# Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond:
- name: grep pluginOrderOverride
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
new file mode 100644
index 000000000..d5b82d9a0
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
@@ -0,0 +1,77 @@
+---
+
+# Pre-upgrade
+- import_playbook: ../initialize_nodes_to_upgrade.yml
+
+- import_playbook: verify_cluster.yml
+
+- name: Update repos on upgrade hosts
+ hosts: "{{ l_upgrade_repo_hosts }}"
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: "{{ l_upgrade_no_proxy_hosts }}"
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- name: OpenShift Health Checks
+ hosts: "{{ l_upgrade_health_check_hosts }}"
+ any_errors_fatal: true
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: upgrade
+ post_tasks:
+ - name: Run health checks (upgrade)
+ action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
+ - docker_image_availability
+
+- import_playbook: ../disable_excluders.yml
+
+- import_playbook: ../../../../init/version.yml
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+# If we're only upgrading nodes, we need to ensure masters are already upgraded
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when:
+ - l_upgrade_nodes_only | default(False) | bool
+ - openshift.common.version != openshift_version
+
+# If we're only upgrading nodes, skip this.
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
+ when: not (l_upgrade_nodes_only | default(False)) | bool
+
+- name: Verify upgrade targets
+ hosts: "{{ l_upgrade_verify_targets_hosts }}"
+ roles:
+ - role: openshift_facts
+ tasks:
+ - include_tasks: verify_upgrade_targets.yml
+
+- name: Verify docker upgrade targets
+ hosts: "{{ l_upgrade_docker_target_hosts }}"
+ tasks:
+ - include_role:
+ name: container_runtime
+ tasks_from: docker_upgrade_check.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml
deleted file mode 100644
index 8ecae4539..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Flag pre-upgrade checks complete for hosts without errors
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - set_fact:
- pre_upgrade_complete: True
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
deleted file mode 100644
index 6d8503879..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-# Only check if docker upgrade is required if docker_upgrade is not
-# already set to False.
-- include: ../../docker/upgrade_check.yml
- when:
- - docker_upgrade is not defined or (docker_upgrade | bool)
- - not (openshift.common.is_atomic | bool)
-
-# Additional checks for Atomic hosts:
-
-- name: Determine available Docker
- shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
- register: g_atomic_docker_version_result
- when: openshift.common.is_atomic | bool
-
-- set_fact:
- l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
- when: openshift.common.is_atomic | bool
-
-- fail:
- msg: This playbook requires access to Docker 1.12 or later
- when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
new file mode 100644
index 000000000..2ab9f852c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
@@ -0,0 +1,93 @@
+---
+# Verify a few items before we proceed with upgrade process.
+
+- name: Verify upgrade can proceed on first master
+ hosts: oo_first_master
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: >
+ This upgrade is only supported for origin and openshift-enterprise
+ deployment types
+ when: deployment_type not in ['origin','openshift-enterprise']
+
+ # Error out in situations where the user has older versions specified in their
+ # inventory in any of the openshift_release, openshift_image_tag, and
+ # openshift_pkg_version variables. These must be removed or updated to proceed
+ # with upgrade.
+ # TODO: Should we block if you're *over* the next major release version as well?
+ - fail:
+ msg: >
+ openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+ valid version for a {{ openshift_upgrade_target }} upgrade
+ when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<')
+
+ - fail:
+ msg: >
+ openshift_image_tag is {{ openshift_image_tag }} which is not a
+ valid version for a {{ openshift_upgrade_target }} upgrade
+ when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<')
+
+ - set_fact:
+ openshift_release: "{{ openshift_release[1:] }}"
+ when: openshift_release is defined and openshift_release[0] == 'v'
+
+ - fail:
+ msg: >
+ openshift_release is {{ openshift_release }} which is not a
+ valid release for a {{ openshift_upgrade_target }} upgrade
+ when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=')
+
+- name: Verify master processes
+ hosts: oo_masters_to_config
+ roles:
+ - lib_utils
+ - openshift_facts
+ tasks:
+ - name: Read master storage backend setting
+ yedit:
+ state: list
+ src: /etc/origin/master/master-config.yaml
+ key: kubernetesMasterConfig.apiServerArguments.storage-backend
+ register: _storage_backend
+
+ - fail:
+ msg: "Storage backend in /etc/origin/master/master-config.yaml must be set to 'etcd3' before the upgrade can continue"
+ when:
+ # assuming the master-config.yml is properly configured, i.e. the value is a list
+ - _storage_backend.result | default([], true) | length == 0 or _storage_backend.result[0] != "etcd3"
+
+ - debug:
+ msg: "Storage backend is set to etcd3"
+
+ - openshift_facts:
+ role: master
+ local_facts:
+ ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+
+ - when: openshift.common.is_containerized | bool
+ block:
+ - set_fact:
+ master_services:
+ - "{{ openshift_service_type }}-master"
+
+ # In case of the non-ha to ha upgrade.
+ - name: Check if the {{ openshift_service_type }}-master-api.service exists
+ command: >
+ systemctl list-units {{ openshift_service_type }}-master-api.service --no-legend
+ register: master_api_service_status
+
+ - set_fact:
+ master_services:
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ when:
+ - master_api_service_status.stdout_lines | length > 0
+ - (openshift_service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
+
+ - name: Ensure Master is running
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: yes
+ with_items: "{{ master_services }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
deleted file mode 100644
index 6a5bc24f7..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- name: Verify master processes
- hosts: oo_masters_to_config
- roles:
- - openshift_facts
- tasks:
- - openshift_facts:
- role: master
- local_facts:
- ha: "{{ groups.oo_masters_to_config | length > 1 }}"
-
- - when: openshift.common.is_containerized | bool
- block:
- - set_fact:
- master_services:
- - "{{ openshift.common.service_type }}-master"
-
- # In case of the non-ha to ha upgrade.
- - name: Check if the {{ openshift.common.service_type }}-master-api.service exists
- command: >
- systemctl list-units {{ openshift.common.service_type }}-master-api.service --no-legend
- register: master_api_service_status
-
- - set_fact:
- master_services:
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- when:
- - master_api_service_status.stdout_lines | length > 0
- - (openshift.common.service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
-
- - name: Ensure Master is running
- service:
- name: "{{ item }}"
- state: started
- enabled: yes
- with_items: "{{ master_services }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml
deleted file mode 100644
index f75ae3b15..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-- name: Verify all masters has etcd3 storage backend set
- hosts: oo_masters_to_config
- gather_facts: no
- roles:
- - lib_utils
- tasks:
- - name: Read master storage backend setting
- yedit:
- state: list
- src: /etc/origin/master/master-config.yaml
- key: kubernetesMasterConfig.apiServerArguments.storage-backend
- register: _storage_backend
-
- - fail:
- msg: "Storage backend in /etc/origin/master/master-config.yaml must be set to 'etcd3' before the upgrade can continue"
- when:
- # assuming the master-config.yml is properly configured, i.e. the value is a list
- - _storage_backend.result | default([], true) | length == 0 or _storage_backend.result[0] != "etcd3"
-
- - debug:
- msg: "Storage backend is set to etcd3"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
deleted file mode 100644
index 2a8de50a2..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: OpenShift Health Checks
- hosts: oo_all_hosts
- any_errors_fatal: true
- roles:
- - openshift_health_checker
- vars:
- - r_openshift_health_checker_playbook_context: upgrade
- post_tasks:
- - name: Run health checks (upgrade)
- action: openshift_health_check
- args:
- checks:
- - disk_availability
- - memory_availability
- - docker_image_availability
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
deleted file mode 100644
index 3c0017891..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- name: Verify upgrade can proceed on first master
- hosts: oo_first_master
- gather_facts: no
- tasks:
- - fail:
- msg: >
- This upgrade is only supported for origin and openshift-enterprise
- deployment types
- when: deployment_type not in ['origin','openshift-enterprise']
-
- # Error out in situations where the user has older versions specified in their
- # inventory in any of the openshift_release, openshift_image_tag, and
- # openshift_pkg_version variables. These must be removed or updated to proceed
- # with upgrade.
- # TODO: Should we block if you're *over* the next major release version as well?
- - fail:
- msg: >
- openshift_pkg_version is {{ openshift_pkg_version }} which is not a
- valid version for a {{ openshift_upgrade_target }} upgrade
- when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<')
-
- - fail:
- msg: >
- openshift_image_tag is {{ openshift_image_tag }} which is not a
- valid version for a {{ openshift_upgrade_target }} upgrade
- when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<')
-
- - set_fact:
- openshift_release: "{{ openshift_release[1:] }}"
- when: openshift_release is defined and openshift_release[0] == 'v'
-
- - fail:
- msg: >
- openshift_release is {{ openshift_release }} which is not a
- valid release for a {{ openshift_upgrade_target }} upgrade
- when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 84b740227..96f970506 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -21,7 +21,7 @@
block:
- name: Check latest available OpenShift RPM version
repoquery:
- name: "{{ openshift.common.service_type }}"
+ name: "{{ openshift_service_type }}"
ignore_excluders: true
register: repoquery_out
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 503d75ba0..7b82fe05b 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -2,22 +2,8 @@
###############################################################################
# Upgrade Masters
###############################################################################
-
-# If facts cache were for some reason deleted, this fact may not be set, and if not set
-# it will always default to true. This causes problems for the etcd data dir fact detection
-# so we must first make sure this is set correctly before attempting the backup.
-- name: Set master embedded_etcd fact
- hosts: oo_masters_to_config
- roles:
- - openshift_facts
- tasks:
- - openshift_facts:
- role: master
- local_facts:
- embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
-
-- name: Upgrade and backup etcd
- include: ./etcd/main.yml
+- name: Backup and upgrade etcd
+ import_playbook: ../../../openshift-etcd/private/upgrade_main.yml
# Create service signer cert when missing. Service signer certificate
# is added to master config in the master_config_upgrade hook.
@@ -30,7 +16,7 @@
register: service_signer_cert_stat
changed_when: false
-- include: create_service_signer_cert.yml
+- import_playbook: create_service_signer_cert.yml
# oc adm migrate storage should be run prior to etcd v3 upgrade
# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
@@ -71,7 +57,7 @@
- debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
- - include: "{{ openshift_master_upgrade_pre_hook }}"
+ - include_tasks: "{{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
- include_role:
@@ -82,20 +68,20 @@
- debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}"
when: openshift_master_upgrade_hook is defined
- - include: "{{ openshift_master_upgrade_hook }}"
+ - include_tasks: "{{ openshift_master_upgrade_hook }}"
when: openshift_master_upgrade_hook is defined
- - include: ../../../openshift-master/private/tasks/restart_hosts.yml
+ - include_tasks: ../../../openshift-master/private/tasks/restart_hosts.yml
when: openshift.common.rolling_restart_mode == 'system'
- - include: ../../../openshift-master/private/tasks/restart_services.yml
+ - include_tasks: ../../../openshift-master/private/tasks/restart_services.yml
when: openshift.common.rolling_restart_mode == 'services'
# Run the post-upgrade hook if defined:
- debug: msg="Running master post-upgrade hook {{ openshift_master_upgrade_post_hook }}"
when: openshift_master_upgrade_post_hook is defined
- - include: "{{ openshift_master_upgrade_post_hook }}"
+ - include_tasks: "{{ openshift_master_upgrade_post_hook }}"
when: openshift_master_upgrade_post_hook is defined
- name: Post master upgrade - Upgrade clusterpolicies storage
@@ -275,7 +261,7 @@
roles:
- openshift_facts
tasks:
- - include: docker/tasks/upgrade.yml
+ - include_tasks: docker/tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
- name: Drain and upgrade master nodes
@@ -305,7 +291,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_control_plane_drain_result
until: not l_upgrade_control_plane_drain_result | failed
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 75ffd3fe9..a3cb1d0f9 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -1,16 +1,25 @@
---
+- name: Prepull images and rpms before doing rolling restart
+ hosts: oo_nodes_to_upgrade:!oo_masters_to_config
+ roles:
+ - role: openshift_facts
+ tasks:
+ - include_role:
+ name: openshift_node
+ tasks_from: upgrade_pre.yml
+ vars:
+ openshift_node_upgrade_in_progress: True
+
- name: Drain and upgrade nodes
hosts: oo_nodes_to_upgrade:!oo_masters_to_config
# This var must be set with -e on invocation, as it is not a per-host inventory var
# and is evaluated early. Values such as "20%" can also be used.
serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
-
+ roles:
+ - lib_openshift
+ - openshift_facts
pre_tasks:
- - name: Load lib_openshift modules
- import_role:
- name: lib_openshift
-
# TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
# or docker actually needs an upgrade before proceeding. Perhaps best to save this until
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
@@ -26,26 +35,19 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
until: not l_upgrade_nodes_drain_result | failed
retries: 60
delay: 60
- roles:
- - openshift_facts
post_tasks:
- include_role:
name: openshift_node
tasks_from: upgrade.yml
vars:
openshift_node_upgrade_in_progress: True
- - include_role:
- name: openshift_excluder
- vars:
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Set node schedulability
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
@@ -56,3 +58,11 @@
register: node_schedulable
until: node_schedulable|succeeded
when: node_unschedulable|changed
+
+- name: Re-enable excluders
+ hosts: oo_nodes_to_upgrade:!oo_masters_to_config
+ tasks:
+ - include_role:
+ name: openshift_excluder
+ vars:
+ r_openshift_excluder_action: enable
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
index d9ce3a7e3..4fc897a57 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
@@ -11,25 +11,19 @@
msg: "Ensure that new scale groups were provisioned before proceeding to update."
when:
- "'oo_sg_new_nodes' not in groups or groups.oo_sg_new_nodes|length == 0"
+ - "'oo_sg_current_nodes' not in groups or groups.oo_sg_current_nodes|length == 0"
+ - groups.oo_sg_current_nodes == groups.oo_sg_new_nodes
- name: initialize upgrade bits
- include: init.yml
+ import_playbook: init.yml
-- name: Drain and upgrade nodes
+- name: unschedule nodes
hosts: oo_sg_current_nodes
- # This var must be set with -e on invocation, as it is not a per-host inventory var
- # and is evaluated early. Values such as "20%" can also be used.
- serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
- max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
-
- pre_tasks:
+ tasks:
- name: Load lib_openshift modules
- include_role:
+ import_role:
name: ../roles/lib_openshift
- # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
- # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
- # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- name: Mark node unschedulable
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
@@ -40,14 +34,27 @@
register: node_unschedulable
until: node_unschedulable|succeeded
+- name: Drain nodes
+ hosts: oo_sg_current_nodes
+ # This var must be set with -e on invocation, as it is not a per-host inventory var
+ # and is evaluated early. Values such as "20%" can also be used.
+ serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
+ max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
+ tasks:
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --force --delete-local-data --ignore-daemonsets
+ --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
until: not l_upgrade_nodes_drain_result | failed
- retries: 60
- delay: 60
+ retries: "{{ 1 if openshift_upgrade_nodes_drain_timeout | default(0) == '0' else 0 | int }}"
+ delay: 5
+ failed_when:
+ - l_upgrade_nodes_drain_result | failed
+ - openshift_upgrade_nodes_drain_timeout | default(0) == '0'
# Alright, let's clean up!
- name: clean up the old scale group
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index 5f9c56867..a5ad3801d 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -2,7 +2,7 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -15,104 +15,29 @@
openshift_upgrade_target: '3.6'
openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
-- include: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_6/master_config_upgrade.yml"
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index 1aac3d014..1498db4c5 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -11,110 +11,38 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
- tags:
- - pre_upgrade
+- import_playbook: ../init.yml
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
openshift_upgrade_target: '3.6'
openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
-- include: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_6/master_config_upgrade.yml"
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index 306b76422..6958652d8 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -4,7 +4,7 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -17,93 +17,22 @@
openshift_upgrade_target: '3.6'
openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index 6d4949542..4daa9e490 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -2,7 +2,7 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -15,105 +15,26 @@
openshift_upgrade_target: '3.7'
openshift_upgrade_min: '3.6'
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
-- include: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -121,16 +42,18 @@
- name: Cycle all controller services to force new leader election mode
hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index 0a592896b..1750148d4 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -11,113 +11,37 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
- tags:
- - pre_upgrade
+- import_playbook: ../init.yml
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
openshift_upgrade_target: '3.7'
openshift_upgrade_min: '3.6'
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
-- include: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -125,14 +49,16 @@
- name: Cycle all controller services to force new leader election mode
hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
index b381d606a..16d95514c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -4,7 +4,7 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -17,93 +17,22 @@
openshift_upgrade_target: '3.7'
openshift_upgrade_min: '3.6'
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
index e7d7756d1..0f74e0137 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -2,7 +2,7 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -15,105 +15,26 @@
openshift_upgrade_target: '3.8'
openshift_upgrade_min: '3.7'
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
-- include: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -121,16 +42,18 @@
- name: Cycle all controller services to force new leader election mode
hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index be362e3ff..08bfd239f 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -11,113 +11,37 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
- tags:
- - pre_upgrade
+- import_playbook: ../init.yml
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
openshift_upgrade_target: '3.8'
openshift_upgrade_min: '3.7'
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
-- include: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -125,14 +49,16 @@
- name: Cycle all controller services to force new leader election mode
hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
index 6e68116b0..b5f1038fd 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
@@ -4,7 +4,7 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -17,93 +17,22 @@
openshift_upgrade_target: '3.8'
openshift_upgrade_min: '3.7'
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
index 94c16cae0..0aea5069d 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -2,124 +2,35 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
- tags:
- - pre_upgrade
+- import_playbook: ../init.yml
- name: Configure the upgrade target for the common upgrade tasks
hosts: oo_all_hosts
- tags:
- - pre_upgrade
tasks:
- set_fact:
openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.7'
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
+ - set_fact:
+ pre_upgrade_complete: True
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+# Pre-upgrade completed
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -127,6 +38,8 @@
- name: Cycle all controller services to force new leader election mode
hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- name: Stop {{ openshift.common.service_type }}-master-controllers
systemd:
@@ -137,6 +50,6 @@
name: "{{ openshift.common.service_type }}-master-controllers"
state: started
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index 2045f6379..05aa737c6 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -11,119 +11,38 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
- tags:
- - pre_upgrade
+- import_playbook: ../init.yml
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.7'
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
-- include: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
+ - set_fact:
+ pre_upgrade_complete: True
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+# Pre-upgrade completed
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -131,6 +50,8 @@
- name: Cycle all controller services to force new leader election mode
hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- name: Stop {{ openshift.common.service_type }}-master-controllers
systemd:
@@ -141,4 +62,4 @@
name: "{{ openshift.common.service_type }}-master-controllers"
state: started
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
index 6134f8653..1d1b255c1 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
@@ -4,112 +4,31 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
- tags:
- - pre_upgrade
+- import_playbook: ../init.yml
- name: Configure the upgrade target for the common upgrade tasks
hosts: oo_all_hosts
- tags:
- - pre_upgrade
tasks:
- set_fact:
openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.7'
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/container-runtime/config.yml b/playbooks/container-runtime/config.yml
new file mode 100644
index 000000000..f15aa771f
--- /dev/null
+++ b/playbooks/container-runtime/config.yml
@@ -0,0 +1,6 @@
+---
+- import_playbook: ../init/main.yml
+ vars:
+ skip_verison: True
+
+- import_playbook: private/config.yml
diff --git a/playbooks/container-runtime/private/config.yml b/playbooks/container-runtime/private/config.yml
new file mode 100644
index 000000000..67445edeb
--- /dev/null
+++ b/playbooks/container-runtime/private/config.yml
@@ -0,0 +1,28 @@
+---
+- hosts: "{{ l_containerized_host_groups }}"
+ vars:
+ l_chg_temp: "{{ openshift_containerized_host_groups | default([]) }}"
+ l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}"
+ # role: container_runtime is necessary here to bring role default variables
+ # into the play scope.
+ roles:
+ - role: container_runtime
+ tasks:
+ - include_role:
+ name: container_runtime
+ tasks_from: package_docker.yml
+ when:
+ - not openshift_docker_use_system_container | bool
+ - not openshift_use_crio_only | bool
+ - include_role:
+ name: container_runtime
+ tasks_from: systemcontainer_docker.yml
+ when:
+ - openshift_docker_use_system_container | bool
+ - not openshift_use_crio_only | bool
+ - include_role:
+ name: container_runtime
+ tasks_from: systemcontainer_crio.yml
+ when:
+ - openshift_use_crio | bool
+ - openshift_docker_is_node_or_master | bool
diff --git a/playbooks/container-runtime/private/roles b/playbooks/container-runtime/private/roles
new file mode 120000
index 000000000..148b13206
--- /dev/null
+++ b/playbooks/container-runtime/private/roles
@@ -0,0 +1 @@
+../../roles/ \ No newline at end of file
diff --git a/playbooks/deploy_cluster.yml b/playbooks/deploy_cluster.yml
new file mode 100644
index 000000000..0e6bde09a
--- /dev/null
+++ b/playbooks/deploy_cluster.yml
@@ -0,0 +1,46 @@
+---
+- import_playbook: init/main.yml
+
+- import_playbook: openshift-checks/private/install.yml
+
+- import_playbook: openshift-etcd/private/config.yml
+
+- import_playbook: openshift-nfs/private/config.yml
+ when: groups.oo_nfs_to_config | default([]) | count > 0
+
+- import_playbook: openshift-loadbalancer/private/config.yml
+ when: groups.oo_lb_to_config | default([]) | count > 0
+
+- import_playbook: openshift-master/private/config.yml
+
+- import_playbook: openshift-master/private/additional_config.yml
+
+- import_playbook: openshift-node/private/config.yml
+
+- import_playbook: openshift-glusterfs/private/config.yml
+ when: groups.oo_glusterfs_to_config | default([]) | count > 0
+
+- import_playbook: openshift-hosted/private/config.yml
+
+- import_playbook: openshift-metrics/private/config.yml
+ when: openshift_metrics_install_metrics | default(false) | bool
+
+- import_playbook: openshift-logging/private/config.yml
+ when: openshift_logging_install_logging | default(false) | bool
+
+- import_playbook: openshift-prometheus/private/config.yml
+ when: openshift_hosted_prometheus_deploy | default(false) | bool
+
+- import_playbook: openshift-service-catalog/private/config.yml
+ when: openshift_enable_service_catalog | default(true) | bool
+
+- import_playbook: openshift-management/private/config.yml
+ when: openshift_management_install_management | default(false) | bool
+
+- name: Print deprecated variable warning message if necessary
+ hosts: oo_first_master
+ gather_facts: no
+ tasks:
+ - debug: msg="{{__deprecation_message}}"
+ when:
+ - __deprecation_message | default ('') | length > 0
diff --git a/playbooks/gcp/openshift-cluster/provision.yml b/playbooks/gcp/provision.yml
index 097717607..6016e6a78 100644
--- a/playbooks/gcp/openshift-cluster/provision.yml
+++ b/playbooks/gcp/provision.yml
@@ -9,8 +9,5 @@
include_role:
name: openshift_gcp
-- name: run the init
- include: ../../init/main.yml
-
-- name: run the config
- include: ../../common/openshift-cluster/config.yml
+- name: run the cluster deploy
+ import_playbook: ../deploy_cluster.yml
diff --git a/playbooks/init/evaluate_groups.yml b/playbooks/init/evaluate_groups.yml
index 8787c87e1..8087f6ffc 100644
--- a/playbooks/init/evaluate_groups.yml
+++ b/playbooks/init/evaluate_groups.yml
@@ -46,14 +46,9 @@
- name: Evaluate groups - Fail if no etcd hosts group is defined
fail:
msg: >
- Running etcd as an embedded service is no longer supported. If this is a
- new install please define an 'etcd' group with either one or three
- hosts. These hosts may be the same hosts as your masters. If this is an
- upgrade you may set openshift_master_unsupported_embedded_etcd=true
- until a migration playbook becomes available.
+ Running etcd as an embedded service is no longer supported.
when:
- g_etcd_hosts | default([]) | length not in [3,1]
- - not openshift_master_unsupported_embedded_etcd | default(False)
- not (openshift_node_bootstrap | default(False))
- name: Evaluate oo_all_hosts
diff --git a/playbooks/init/repos.yml b/playbooks/init/repos.yml
index a7114fc80..4967b8f13 100644
--- a/playbooks/init/repos.yml
+++ b/playbooks/init/repos.yml
@@ -3,6 +3,14 @@
hosts: oo_all_hosts
gather_facts: no
tasks:
+ - name: subscribe instances to Red Hat Subscription Manager
+ include_role:
+ name: rhel_subscribe
+ when:
+ - ansible_distribution == 'RedHat'
+ - deployment_type == 'openshift-enterprise'
+ - (rhel_subscription_user or rhsub_user) is defined
+ - (rhel_subscription_password or rhsub_pass) is defined
- name: initialize openshift repos
include_role:
name: openshift_repos
diff --git a/playbooks/init/validate_hostnames.yml b/playbooks/init/validate_hostnames.yml
index be2e6a15a..86e0b2416 100644
--- a/playbooks/init/validate_hostnames.yml
+++ b/playbooks/init/validate_hostnames.yml
@@ -1,6 +1,7 @@
---
- name: Validate node hostnames
hosts: oo_nodes_to_config
+ any_errors_fatal: true
tasks:
- name: Query DNS for IP address of {{ openshift.common.hostname }}
shell:
@@ -8,16 +9,35 @@
register: lookupip
changed_when: false
failed_when: false
- - name: Warn user about bad openshift_hostname values
- pause:
- prompt:
+
+ - name: Validate openshift_hostname when defined
+ fail:
+ msg: >
The hostname {{ openshift.common.hostname }} for {{ ansible_nodename }}
doesn't resolve to an IP address owned by this host. Please set
openshift_hostname variable to a hostname that when resolved on the host
- in question resolves to an IP address matching an interface on this
- host. This host will fail liveness checks for pods utilizing hostPorts,
- press ENTER to continue or CTRL-C to abort.
- seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}"
+ in question resolves to an IP address matching an interface on this host.
+ This will ensure proper functionality of OpenShift networking features.
+ Inventory setting: openshift_hostname={{ openshift_hostname }}
+ This check can be overridden by setting openshift_hostname_check=false in
+ the inventory.
+ See https://docs.openshift.org/latest/install_config/install/advanced_install.html#configuring-host-variables
when:
- lookupip.stdout != '127.0.0.1'
- lookupip.stdout not in ansible_all_ipv4_addresses
+ - openshift_hostname_check | default(true)
+
+ - name: Validate openshift_ip exists on node when defined
+ fail:
+ msg: >
+ The IP address {{ openshift_ip }} does not exist on {{ ansible_nodename }}.
+ Please set the openshift_ip variable to an IP address of this node.
+ This will ensure proper functionality of OpenShift networking features.
+ Inventory setting: openshift_ip={{ openshift_ip }}
+ This check can be overridden by setting openshift_ip_check=false in
+ the inventory.
+ See https://docs.openshift.org/latest/install_config/install/advanced_install.html#configuring-host-variables
+ when:
+ - openshift_ip is defined
+ - openshift_ip not in ansible_all_ipv4_addresses
+ - openshift_ip_check | default(true)
diff --git a/playbooks/openshift-etcd/private/ca.yml b/playbooks/openshift-etcd/private/ca.yml
index c9f186e72..f3bb3c2d1 100644
--- a/playbooks/openshift-etcd/private/ca.yml
+++ b/playbooks/openshift-etcd/private/ca.yml
@@ -7,7 +7,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: ca
+ tasks_from: ca.yml
vars:
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
diff --git a/playbooks/openshift-etcd/private/certificates-backup.yml b/playbooks/openshift-etcd/private/certificates-backup.yml
index d738c8207..ce21a1f96 100644
--- a/playbooks/openshift-etcd/private/certificates-backup.yml
+++ b/playbooks/openshift-etcd/private/certificates-backup.yml
@@ -5,10 +5,10 @@
tasks:
- include_role:
name: etcd
- tasks_from: backup_generated_certificates
+ tasks_from: backup_generated_certificates.yml
- include_role:
name: etcd
- tasks_from: remove_generated_certificates
+ tasks_from: remove_generated_certificates.yml
- name: Backup deployed etcd certificates
hosts: oo_etcd_to_config
@@ -16,4 +16,4 @@
tasks:
- include_role:
name: etcd
- tasks_from: backup_server_certificates
+ tasks_from: backup_server_certificates.yml
diff --git a/playbooks/openshift-etcd/private/config.yml b/playbooks/openshift-etcd/private/config.yml
index 3d6c79834..35407969e 100644
--- a/playbooks/openshift-etcd/private/config.yml
+++ b/playbooks/openshift-etcd/private/config.yml
@@ -19,7 +19,6 @@
hosts: oo_etcd_to_config
any_errors_fatal: true
roles:
- - role: os_firewall
- role: openshift_clock
- role: openshift_etcd
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
diff --git a/playbooks/openshift-etcd/private/embedded2external.yml b/playbooks/openshift-etcd/private/embedded2external.yml
index 514319b88..be177b714 100644
--- a/playbooks/openshift-etcd/private/embedded2external.yml
+++ b/playbooks/openshift-etcd/private/embedded2external.yml
@@ -20,9 +20,9 @@
- name: Check the master API is ready
include_role:
name: openshift_master
- tasks_from: check_master_api_is_ready
+ tasks_from: check_master_api_is_ready.yml
- set_fact:
- master_service: "{{ openshift.common.service_type + '-master' }}"
+ master_service: "{{ openshift_service_type + '-master' }}"
embedded_etcd_backup_suffix: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- debug:
msg: "master service name: {{ master_service }}"
@@ -34,7 +34,7 @@
# Can't use with_items with include_role: https://github.com/ansible/ansible/issues/21285
- include_role:
name: etcd
- tasks_from: backup
+ tasks_from: backup.yml
vars:
r_etcd_common_backup_tag: pre-migrate
r_etcd_common_embedded_etcd: "{{ true }}"
@@ -42,7 +42,7 @@
- include_role:
name: etcd
- tasks_from: backup.archive
+ tasks_from: backup.archive.yml
vars:
r_etcd_common_backup_tag: pre-migrate
r_etcd_common_embedded_etcd: "{{ true }}"
@@ -58,7 +58,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: backup_master_etcd_certificates
+ tasks_from: backup_master_etcd_certificates.yml
- name: Redeploy master etcd certificates
import_playbook: master_etcd_certificates.yml
@@ -75,10 +75,10 @@
pre_tasks:
- include_role:
name: etcd
- tasks_from: disable_etcd
+ tasks_from: disable_etcd.yml
- include_role:
name: etcd
- tasks_from: clean_data
+ tasks_from: clean_data.yml
# 6. copy the embedded etcd backup to the external host
# TODO(jchaloup): if the etcd and first master are on the same host, just copy the directory
@@ -93,7 +93,7 @@
- include_role:
name: etcd
- tasks_from: backup.fetch
+ tasks_from: backup.fetch.yml
vars:
etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}"
r_etcd_common_backup_tag: pre-migrate
@@ -103,7 +103,7 @@
- include_role:
name: etcd
- tasks_from: backup.copy
+ tasks_from: backup.copy.yml
vars:
etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}"
r_etcd_common_backup_tag: pre-migrate
@@ -124,14 +124,14 @@
tasks:
- include_role:
name: etcd
- tasks_from: backup.unarchive
+ tasks_from: backup.unarchive.yml
vars:
r_etcd_common_backup_tag: pre-migrate
r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
- include_role:
name: etcd
- tasks_from: backup.force_new_cluster
+ tasks_from: backup.force_new_cluster.yml
vars:
r_etcd_common_backup_tag: pre-migrate
r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
@@ -145,7 +145,7 @@
tasks:
- include_role:
name: openshift_master
- tasks_from: configure_external_etcd
+ tasks_from: configure_external_etcd.yml
vars:
etcd_peer_url_scheme: "https"
etcd_ip: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.ip }}"
diff --git a/playbooks/openshift-etcd/private/migrate.yml b/playbooks/openshift-etcd/private/migrate.yml
index 4269918c2..313ed8bec 100644
--- a/playbooks/openshift-etcd/private/migrate.yml
+++ b/playbooks/openshift-etcd/private/migrate.yml
@@ -17,9 +17,8 @@
tasks:
- include_role:
name: etcd
- tasks_from: migrate.pre_check
+ tasks_from: migrate.pre_check.yml
vars:
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
etcd_peer: "{{ ansible_default_ipv4.address }}"
# TODO: This will be different for release-3.6 branch
@@ -28,8 +27,8 @@
tasks:
- set_fact:
master_services:
- - "{{ openshift.common.service_type + '-master-controllers' }}"
- - "{{ openshift.common.service_type + '-master-api' }}"
+ - "{{ openshift_service_type + '-master-controllers' }}"
+ - "{{ openshift_service_type + '-master-api' }}"
- debug:
msg: "master service name: {{ master_services }}"
- name: Stop masters
@@ -46,10 +45,9 @@
post_tasks:
- include_role:
name: etcd
- tasks_from: backup
+ tasks_from: backup.yml
vars:
r_etcd_common_backup_tag: pre-migration
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- name: Gate on etcd backup
@@ -74,7 +72,7 @@
pre_tasks:
- include_role:
name: etcd
- tasks_from: disable_etcd
+ tasks_from: disable_etcd.yml
- name: Migrate data on first etcd
hosts: oo_etcd_to_migrate[0]
@@ -82,9 +80,8 @@
tasks:
- include_role:
name: etcd
- tasks_from: migrate
+ tasks_from: migrate.yml
vars:
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
etcd_peer: "{{ openshift.common.ip }}"
etcd_url_scheme: "https"
etcd_peer_url_scheme: "https"
@@ -95,9 +92,8 @@
tasks:
- include_role:
name: etcd
- tasks_from: clean_data
+ tasks_from: clean_data.yml
vars:
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
etcd_peer: "{{ openshift.common.ip }}"
etcd_url_scheme: "https"
etcd_peer_url_scheme: "https"
@@ -132,7 +128,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: migrate.add_ttls
+ tasks_from: migrate.add_ttls.yml
vars:
etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].openshift.common.ip }}"
etcd_url_scheme: "https"
@@ -144,7 +140,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: migrate.configure_master
+ tasks_from: migrate.configure_master.yml
when: etcd_migration_failed | length == 0
- debug:
msg: "Skipping master re-configuration since migration failed."
diff --git a/playbooks/openshift-etcd/private/redeploy-ca.yml b/playbooks/openshift-etcd/private/redeploy-ca.yml
index cc5d57031..158bcb849 100644
--- a/playbooks/openshift-etcd/private/redeploy-ca.yml
+++ b/playbooks/openshift-etcd/private/redeploy-ca.yml
@@ -16,12 +16,12 @@
tasks:
- include_role:
name: etcd
- tasks_from: backup_ca_certificates
+ tasks_from: backup_ca_certificates.yml
- include_role:
name: etcd
- tasks_from: remove_ca_certificates
+ tasks_from: remove_ca_certificates.yml
-- include: ca.yml
+- import_playbook: ca.yml
- name: Create temp directory for syncing certs
hosts: localhost
@@ -44,7 +44,7 @@
etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
-- include: restart.yml
+- import_playbook: restart.yml
# Do not restart etcd when etcd certificates were previously expired.
when: ('expired' not in (hostvars
| oo_select_keys(groups['etcd'])
@@ -56,7 +56,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: retrieve_ca_certificates
+ tasks_from: retrieve_ca_certificates.yml
vars:
etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}"
@@ -82,7 +82,7 @@
state: absent
changed_when: false
-- include: ../../openshift-master/private/restart.yml
+- import_playbook: ../../openshift-master/private/restart.yml
# Do not restart masters when master or etcd certificates were previously expired.
when:
# masters
diff --git a/playbooks/openshift-etcd/private/redeploy-certificates.yml b/playbooks/openshift-etcd/private/redeploy-certificates.yml
index cc1e6adf5..1c8eb27ac 100644
--- a/playbooks/openshift-etcd/private/redeploy-certificates.yml
+++ b/playbooks/openshift-etcd/private/redeploy-certificates.yml
@@ -11,8 +11,8 @@
# certificates were previously expired.
- role: openshift_certificate_expiry
-- include: certificates-backup.yml
+- import_playbook: certificates-backup.yml
-- include: certificates.yml
+- import_playbook: certificates.yml
vars:
etcd_certificates_redeploy: true
diff --git a/playbooks/openshift-etcd/private/scaleup.yml b/playbooks/openshift-etcd/private/scaleup.yml
index fac8e3f02..3ef043ec8 100644
--- a/playbooks/openshift-etcd/private/scaleup.yml
+++ b/playbooks/openshift-etcd/private/scaleup.yml
@@ -32,7 +32,7 @@
until: etcd_add_check.rc == 0
- include_role:
name: etcd
- tasks_from: server_certificates
+ tasks_from: server_certificates.yml
vars:
etcd_peers: "{{ groups.oo_new_etcd_to_config | default([], true) }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_new_etcd_to_config | default([], true) }}"
@@ -78,4 +78,4 @@
post_tasks:
- include_role:
name: openshift_master
- tasks_from: update_etcd_client_urls
+ tasks_from: update_etcd_client_urls.yml
diff --git a/playbooks/openshift-etcd/private/server_certificates.yml b/playbooks/openshift-etcd/private/server_certificates.yml
index 14c74baf3..695b53990 100644
--- a/playbooks/openshift-etcd/private/server_certificates.yml
+++ b/playbooks/openshift-etcd/private/server_certificates.yml
@@ -7,7 +7,7 @@
post_tasks:
- include_role:
name: etcd
- tasks_from: server_certificates
+ tasks_from: server_certificates.yml
vars:
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/openshift-etcd/private/upgrade_backup.yml
index 531175c85..7dfea07f1 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ b/playbooks/openshift-etcd/private/upgrade_backup.yml
@@ -6,10 +6,9 @@
post_tasks:
- include_role:
name: etcd
- tasks_from: backup
+ tasks_from: backup.yml
vars:
r_etcd_common_backup_tag: "{{ etcd_backup_tag }}"
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- name: Gate on etcd backup
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml b/playbooks/openshift-etcd/private/upgrade_image_members.yml
index 6fca42bd0..c133c0201 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
+++ b/playbooks/openshift-etcd/private/upgrade_image_members.yml
@@ -8,7 +8,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: upgrade_image
+ tasks_from: upgrade_image.yml
vars:
r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
etcd_peer: "{{ openshift.common.hostname }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/openshift-etcd/private/upgrade_main.yml
index 5b8ba3bb2..e373a4a4c 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
+++ b/playbooks/openshift-etcd/private/upgrade_main.yml
@@ -6,7 +6,7 @@
# available in the repos. So for Fedora we'll simply skip this, sorry.
- name: Backup etcd before upgrading anything
- include: backup.yml
+ import_playbook: upgrade_backup.yml
vars:
etcd_backup_tag: "pre-upgrade-"
when: openshift_etcd_backup | default(true) | bool
@@ -16,14 +16,14 @@
tasks:
- include_role:
name: etcd
- tasks_from: drop_etcdctl
+ tasks_from: drop_etcdctl.yml
- name: Perform etcd upgrade
- include: ./upgrade.yml
+ import_playbook: upgrade_step.yml
when: openshift_etcd_upgrade | default(true) | bool
- name: Backup etcd
- include: backup.yml
+ import_playbook: upgrade_backup.yml
vars:
etcd_backup_tag: "post-3.0-"
when: openshift_etcd_backup | default(true) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml
index 51e8786b3..902c39d9c 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
+++ b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml
@@ -8,7 +8,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: upgrade_rpm
+ tasks_from: upgrade_rpm.yml
vars:
r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
etcd_peer: "{{ openshift.common.hostname }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/openshift-etcd/private/upgrade_step.yml
index c5ff4133c..60127fc68 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
+++ b/playbooks/openshift-etcd/private/upgrade_step.yml
@@ -6,47 +6,47 @@
name: etcd
tasks_from: version_detect.yml
-- include: upgrade_rpm_members.yml
+- import_playbook: upgrade_rpm_members.yml
vars:
etcd_upgrade_version: '2.1'
-- include: upgrade_rpm_members.yml
+- import_playbook: upgrade_rpm_members.yml
vars:
etcd_upgrade_version: '2.2'
-- include: upgrade_image_members.yml
+- import_playbook: upgrade_image_members.yml
vars:
etcd_upgrade_version: '2.2.5'
-- include: upgrade_rpm_members.yml
+- import_playbook: upgrade_rpm_members.yml
vars:
etcd_upgrade_version: '2.3'
-- include: upgrade_image_members.yml
+- import_playbook: upgrade_image_members.yml
vars:
etcd_upgrade_version: '2.3.7'
-- include: upgrade_rpm_members.yml
+- import_playbook: upgrade_rpm_members.yml
vars:
etcd_upgrade_version: '3.0'
-- include: upgrade_image_members.yml
+- import_playbook: upgrade_image_members.yml
vars:
etcd_upgrade_version: '3.0.15'
-- include: upgrade_rpm_members.yml
+- import_playbook: upgrade_rpm_members.yml
vars:
etcd_upgrade_version: '3.1'
-- include: upgrade_image_members.yml
+- import_playbook: upgrade_image_members.yml
vars:
etcd_upgrade_version: '3.1.3'
-- include: upgrade_rpm_members.yml
+- import_playbook: upgrade_rpm_members.yml
vars:
etcd_upgrade_version: '3.2'
-- include: upgrade_image_members.yml
+- import_playbook: upgrade_image_members.yml
vars:
etcd_upgrade_version: '3.2.7'
@@ -56,7 +56,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: upgrade_image
+ tasks_from: upgrade_image.yml
vars:
etcd_peer: "{{ openshift.common.hostname }}"
when:
diff --git a/playbooks/openshift-etcd/redeploy-ca.yml b/playbooks/openshift-etcd/redeploy-ca.yml
index b1d23675d..769d694ba 100644
--- a/playbooks/openshift-etcd/redeploy-ca.yml
+++ b/playbooks/openshift-etcd/redeploy-ca.yml
@@ -1,4 +1,4 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/redeploy-ca.yml
+- import_playbook: private/redeploy-ca.yml
diff --git a/playbooks/openshift-etcd/redeploy-certificates.yml b/playbooks/openshift-etcd/redeploy-certificates.yml
index 1bd302c03..753878d70 100644
--- a/playbooks/openshift-etcd/redeploy-certificates.yml
+++ b/playbooks/openshift-etcd/redeploy-certificates.yml
@@ -1,10 +1,10 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/redeploy-certificates.yml
+- import_playbook: private/redeploy-certificates.yml
-- include: private/restart.yml
+- import_playbook: private/restart.yml
vars:
g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}"
-- include: ../openshift-master/private/restart.yml
+- import_playbook: ../openshift-master/private/restart.yml
diff --git a/playbooks/openshift-etcd/upgrade.yml b/playbooks/openshift-etcd/upgrade.yml
new file mode 100644
index 000000000..ccc797527
--- /dev/null
+++ b/playbooks/openshift-etcd/upgrade.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/evaluate_groups.yml
+
+- import_playbook: private/upgrade_main.yml
diff --git a/playbooks/openshift-glusterfs/README.md b/playbooks/openshift-glusterfs/README.md
index f62aea229..107bbfff6 100644
--- a/playbooks/openshift-glusterfs/README.md
+++ b/playbooks/openshift-glusterfs/README.md
@@ -26,6 +26,9 @@ file. The hosts in this group are the nodes of the GlusterFS cluster.
devices but you must specify the following variables in `[OSEv3:vars]`:
* `openshift_storage_glusterfs_is_missing=False`
* `openshift_storage_glusterfs_heketi_is_missing=False`
+ * If GlusterFS will be running natively, the target hosts must also be listed
+ in the `nodes` group. They must also already be configured as OpenShift
+ nodes before this playbook runs.
By default, pods for a native GlusterFS cluster will be created in the
`default` namespace. To change this, specify
diff --git a/playbooks/openshift-hosted/redeploy-registry-certificates.yml b/playbooks/openshift-hosted/redeploy-registry-certificates.yml
index 65fb0abda..518a1d624 100644
--- a/playbooks/openshift-hosted/redeploy-registry-certificates.yml
+++ b/playbooks/openshift-hosted/redeploy-registry-certificates.yml
@@ -1,4 +1,4 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/redeploy-registry-certificates.yml
+- import_playbook: private/redeploy-registry-certificates.yml
diff --git a/playbooks/openshift-hosted/redeploy-router-certificates.yml b/playbooks/openshift-hosted/redeploy-router-certificates.yml
index 8dc052751..a74dd8c79 100644
--- a/playbooks/openshift-hosted/redeploy-router-certificates.yml
+++ b/playbooks/openshift-hosted/redeploy-router-certificates.yml
@@ -1,4 +1,4 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/redeploy-router-certificates.yml
+- import_playbook: private/redeploy-router-certificates.yml
diff --git a/playbooks/openshift-loadbalancer/private/config.yml b/playbooks/openshift-loadbalancer/private/config.yml
index 78fe663db..2636d857e 100644
--- a/playbooks/openshift-loadbalancer/private/config.yml
+++ b/playbooks/openshift-loadbalancer/private/config.yml
@@ -11,13 +11,6 @@
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
-- name: Configure firewall load balancers
- hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config
- vars:
- openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
- roles:
- - role: os_firewall
-
- name: Configure load balancers
hosts: oo_lb_to_config
vars:
diff --git a/playbooks/openshift-logging/config.yml b/playbooks/openshift-logging/config.yml
index 8837a2d32..83d330284 100644
--- a/playbooks/openshift-logging/config.yml
+++ b/playbooks/openshift-logging/config.yml
@@ -1,9 +1,9 @@
---
#
# This playbook is a preview of upcoming changes for installing
-# Hosted logging on. See inventory/byo/hosts.*.example for the
+# Hosted logging on. See inventory/hosts.example for the
# currently supported method.
#
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/config.yml
+- import_playbook: private/config.yml
diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml
index afb8d6bd1..15d301ddb 100644
--- a/playbooks/openshift-master/private/config.yml
+++ b/playbooks/openshift-master/private/config.yml
@@ -19,7 +19,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Gather and set facts for master hosts
hosts: oo_masters_to_config
@@ -180,9 +179,7 @@
| oo_collect('openshift.common.ip') | default([]) | join(',')
}}"
roles:
- - role: os_firewall
- role: openshift_master_facts
- - role: openshift_hosted_facts
- role: openshift_clock
- role: openshift_cloud_provider
- role: openshift_builddefaults
@@ -228,6 +225,8 @@
- name: Configure API Aggregation on masters
hosts: oo_masters
serial: 1
+ roles:
+ - role: openshift_facts
tasks:
- include_tasks: tasks/wire_aggregator.yml
@@ -237,7 +236,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Master Install Checkpoint End
hosts: all
diff --git a/playbooks/openshift-master/private/redeploy-certificates.yml b/playbooks/openshift-master/private/redeploy-certificates.yml
index 3bd38a61d..c0f75ae80 100644
--- a/playbooks/openshift-master/private/redeploy-certificates.yml
+++ b/playbooks/openshift-master/private/redeploy-certificates.yml
@@ -1,6 +1,6 @@
---
-- include: certificates-backup.yml
+- import_playbook: certificates-backup.yml
-- include: certificates.yml
+- import_playbook: certificates.yml
vars:
openshift_certificates_redeploy: true
diff --git a/playbooks/openshift-master/private/redeploy-openshift-ca.yml b/playbooks/openshift-master/private/redeploy-openshift-ca.yml
index 59657574a..9f5502141 100644
--- a/playbooks/openshift-master/private/redeploy-openshift-ca.yml
+++ b/playbooks/openshift-master/private/redeploy-openshift-ca.yml
@@ -56,7 +56,7 @@
- groups.oo_etcd_to_config | default([]) | length == 0
- (g_master_config_output.content|b64decode|from_yaml).etcdConfig.servingInfo.clientCA != 'ca-bundle.crt'
# Set servingInfo.clientCA to client-ca-bundle.crt in order to roll the CA certificate.
- # This change will be reverted in playbooks/byo/openshift-cluster/redeploy-certificates.yml
+ # This change will be reverted in playbooks/redeploy-certificates.yml
- modify_yaml:
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: servingInfo.clientCA
@@ -207,7 +207,7 @@
group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}"
with_items: "{{ client_users }}"
-- include: restart.yml
+- import_playbook: restart.yml
# Do not restart masters when master or etcd certificates were previously expired.
when:
# masters
@@ -272,7 +272,7 @@
state: absent
changed_when: false
-- include: ../../openshift-node/private/restart.yml
+- import_playbook: ../../openshift-node/private/restart.yml
# Do not restart nodes when node, master or etcd certificates were previously expired.
when:
# nodes
diff --git a/playbooks/openshift-master/private/scaleup.yml b/playbooks/openshift-master/private/scaleup.yml
index 8229eccfa..007b23ea3 100644
--- a/playbooks/openshift-master/private/scaleup.yml
+++ b/playbooks/openshift-master/private/scaleup.yml
@@ -20,11 +20,11 @@
- restart master controllers
handlers:
- name: restart master api
- service: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ service: name={{ openshift_service_type }}-master-controllers state=restarted
notify: verify api server
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
diff --git a/playbooks/openshift-master/private/tasks/wire_aggregator.yml b/playbooks/openshift-master/private/tasks/wire_aggregator.yml
index ecf8f15d9..4f55d5c82 100644
--- a/playbooks/openshift-master/private/tasks/wire_aggregator.yml
+++ b/playbooks/openshift-master/private/tasks/wire_aggregator.yml
@@ -180,13 +180,13 @@
#restart master serially here
- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
when:
- yedit_output.changed
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
diff --git a/playbooks/openshift-master/redeploy-certificates.yml b/playbooks/openshift-master/redeploy-certificates.yml
index df727247b..8b7272485 100644
--- a/playbooks/openshift-master/redeploy-certificates.yml
+++ b/playbooks/openshift-master/redeploy-certificates.yml
@@ -1,6 +1,6 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/redeploy-certificates.yml
+- import_playbook: private/redeploy-certificates.yml
-- include: private/restart.yml
+- import_playbook: private/restart.yml
diff --git a/playbooks/openshift-master/redeploy-openshift-ca.yml b/playbooks/openshift-master/redeploy-openshift-ca.yml
index 3ae74c7a0..27f4e6b7d 100644
--- a/playbooks/openshift-master/redeploy-openshift-ca.yml
+++ b/playbooks/openshift-master/redeploy-openshift-ca.yml
@@ -1,4 +1,4 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/redeploy-openshift-ca.yml
+- import_playbook: private/redeploy-openshift-ca.yml
diff --git a/playbooks/openshift-nfs/private/config.yml b/playbooks/openshift-nfs/private/config.yml
index 6ea77e00b..3625efcc6 100644
--- a/playbooks/openshift-nfs/private/config.yml
+++ b/playbooks/openshift-nfs/private/config.yml
@@ -14,7 +14,6 @@
- name: Configure nfs
hosts: oo_nfs_to_config
roles:
- - role: os_firewall
- role: openshift_storage_nfs
- name: NFS Install Checkpoint End
diff --git a/playbooks/openshift-node/private/additional_config.yml b/playbooks/openshift-node/private/additional_config.yml
index 261e2048f..b86cb3cc2 100644
--- a/playbooks/openshift-node/private/additional_config.yml
+++ b/playbooks/openshift-node/private/additional_config.yml
@@ -33,7 +33,6 @@
roles:
- role: flannel
etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
when: openshift_use_flannel | default(false) | bool
- name: Additional node config
diff --git a/playbooks/openshift-node/private/configure_nodes.yml b/playbooks/openshift-node/private/configure_nodes.yml
index dc5d7a57e..32b288c8b 100644
--- a/playbooks/openshift-node/private/configure_nodes.yml
+++ b/playbooks/openshift-node/private/configure_nodes.yml
@@ -10,7 +10,6 @@
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
roles:
- - role: os_firewall
- role: openshift_clock
- role: openshift_node
- role: tuned
diff --git a/playbooks/openshift-node/private/containerized_nodes.yml b/playbooks/openshift-node/private/containerized_nodes.yml
index 5afa83be7..ef07669cb 100644
--- a/playbooks/openshift-node/private/containerized_nodes.yml
+++ b/playbooks/openshift-node/private/containerized_nodes.yml
@@ -12,7 +12,6 @@
}}"
roles:
- - role: os_firewall
- role: openshift_clock
- role: openshift_node
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
diff --git a/playbooks/openshift-node/private/enable_excluders.yml b/playbooks/openshift-node/private/enable_excluders.yml
index 5288b14f9..30713e694 100644
--- a/playbooks/openshift-node/private/enable_excluders.yml
+++ b/playbooks/openshift-node/private/enable_excluders.yml
@@ -5,4 +5,3 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/openshift-node/private/redeploy-certificates.yml b/playbooks/openshift-node/private/redeploy-certificates.yml
index 3bd38a61d..c0f75ae80 100644
--- a/playbooks/openshift-node/private/redeploy-certificates.yml
+++ b/playbooks/openshift-node/private/redeploy-certificates.yml
@@ -1,6 +1,6 @@
---
-- include: certificates-backup.yml
+- import_playbook: certificates-backup.yml
-- include: certificates.yml
+- import_playbook: certificates.yml
vars:
openshift_certificates_redeploy: true
diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml
index 41eb00f99..6a10585b9 100644
--- a/playbooks/openshift-node/private/restart.yml
+++ b/playbooks/openshift-node/private/restart.yml
@@ -5,6 +5,7 @@
roles:
- lib_openshift
+ - openshift_facts
tasks:
- name: Restart docker
@@ -23,9 +24,9 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
failed_when: false
when: openshift.common.is_containerized | bool
@@ -40,7 +41,7 @@
- name: restart node
service:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
state: restarted
- name: Wait for node to be ready
diff --git a/playbooks/openshift-node/private/setup.yml b/playbooks/openshift-node/private/setup.yml
index 794c03a67..541913aef 100644
--- a/playbooks/openshift-node/private/setup.yml
+++ b/playbooks/openshift-node/private/setup.yml
@@ -5,7 +5,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Evaluate node groups
hosts: localhost
diff --git a/playbooks/openshift-node/redeploy-certificates.yml b/playbooks/openshift-node/redeploy-certificates.yml
index df727247b..8b7272485 100644
--- a/playbooks/openshift-node/redeploy-certificates.yml
+++ b/playbooks/openshift-node/redeploy-certificates.yml
@@ -1,6 +1,6 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/redeploy-certificates.yml
+- import_playbook: private/redeploy-certificates.yml
-- include: private/restart.yml
+- import_playbook: private/restart.yml
diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md
index f567242cd..d361d6278 100644
--- a/playbooks/openstack/README.md
+++ b/playbooks/openstack/README.md
@@ -226,7 +226,7 @@ advanced configuration:
[hardware-requirements]: https://docs.openshift.org/latest/install_config/install/prerequisites.html#hardware
[origin]: https://www.openshift.org/
[centos7]: https://www.centos.org/
-[sample-openshift-inventory]: https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.example
+[sample-openshift-inventory]: https://github.com/openshift/openshift-ansible/blob/master/inventory/hosts.example
[advanced-configuration]: ./advanced-configuration.md
[accessing-openshift]: ./advanced-configuration.md#accessing-the-openshift-cluster
[uninstall-openshift]: ./advanced-configuration.md#removing-the-openshift-cluster
diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md
index db2a13d38..2c9b70b5f 100644
--- a/playbooks/openstack/advanced-configuration.md
+++ b/playbooks/openstack/advanced-configuration.md
@@ -159,11 +159,22 @@ So the provisioned cluster nodes will start using those natively as
default nameservers. Technically, this allows to deploy OpenShift clusters
without dnsmasq proxies.
-The `openshift_openstack_clusterid` and `openshift_openstack_public_dns_domain` will form the cluster's DNS domain all
-your servers will be under. With the default values, this will be
-`openshift.example.com`. For workloads, the default subdomain is 'apps'.
-That sudomain can be set as well by the `openshift_openstack_app_subdomain` variable in
-the inventory.
+The `openshift_openstack_clusterid` and `openshift_openstack_public_dns_domain`
+will form the cluster's public DNS domain all your servers will be under. With
+the default values, this will be `openshift.example.com`. For workloads, the
+default subdomain is 'apps'. That sudomain can be set as well by the
+`openshift_openstack_app_subdomain` variable in the inventory.
+
+If you want to use a two sets of hostnames for public and private/prefixed DNS
+records for your externally managed public DNS server, you can specify
+`openshift_openstack_public_hostname_suffix` and/or
+`openshift_openstack_private_hostname_suffix`. The suffixes will be added
+to the nsupdate records sent to the external DNS server. Those are empty by default.
+
+**Note** the real hostnames, Nova servers' or ansible hostnames and inventory
+variables will not be updated. The deployment may be done on arbitrary named
+hosts with the hostnames managed by cloud-init. Inventory hostnames will ignore
+the suffixes.
The `openstack_<role name>_hostname` is a set of variables used for customising
public names of Nova servers provisioned with a given role. When such a variable stays commented,
@@ -343,7 +354,7 @@ installation for example by specifying the authentication.
The full list of options is available in this sample inventory:
-https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.ose.example
+https://github.com/openshift/openshift-ansible/blob/master/inventory/hosts.example
Note, that in order to deploy OpenShift origin, you should update the following
variables for the `inventory/group_vars/OSEv3.yml`, `all.yml`:
@@ -604,7 +615,7 @@ A library of custom post-provision actions exists in `openshift-ansible-contrib/
Once it succeeds, you can install openshift by running:
- ansible-playbook openshift-ansible/playbooks/byo/config.yml
+ ansible-playbook openshift-ansible/playbooks/deploy_cluster.yml
## Access UI
diff --git a/playbooks/openstack/openshift-cluster/install.yml b/playbooks/openstack/openshift-cluster/install.yml
index 1c4f609e3..3211f619a 100644
--- a/playbooks/openstack/openshift-cluster/install.yml
+++ b/playbooks/openstack/openshift-cluster/install.yml
@@ -8,8 +8,5 @@
# values here. We do it in the OSEv3 group vars. Do we need to add
# some logic here?
-- name: run the initialization
- include: ../../init/main.yml
-
-- name: run the config
- include: ../../common/openshift-cluster/config.yml
+- name: run the cluster deploy
+ import_playbook: ../../deploy_cluster.yml
diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml
index 3e295b2c8..583e72b51 100644
--- a/playbooks/openstack/openshift-cluster/provision.yml
+++ b/playbooks/openstack/openshift-cluster/provision.yml
@@ -10,7 +10,7 @@
# NOTE(shadower): Bring in the host groups:
- name: evaluate groups
- include: ../../init/evaluate_groups.yml
+ import_playbook: ../../init/evaluate_groups.yml
- name: Wait for the nodes and gather their facts
@@ -27,7 +27,7 @@
setup:
- name: set common facts
- include: ../../init/facts.yml
+ import_playbook: ../../init/facts.yml
# TODO(shadower): consider splitting this up so people can stop here
diff --git a/playbooks/openstack/openshift-cluster/provision_install.yml b/playbooks/openstack/openshift-cluster/provision_install.yml
index 5d88c105f..fc2854605 100644
--- a/playbooks/openstack/openshift-cluster/provision_install.yml
+++ b/playbooks/openstack/openshift-cluster/provision_install.yml
@@ -1,9 +1,9 @@
---
- name: Check the prerequisites for cluster provisioning in OpenStack
- include: prerequisites.yml
+ import_playbook: prerequisites.yml
- name: Include the provision.yml playbook to create cluster
- include: provision.yml
+ import_playbook: provision.yml
- name: Include the install.yml playbook to install cluster
- include: install.yml
+ import_playbook: install.yml
diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml
index 7dd59c5d8..7b7868cfe 100644
--- a/playbooks/prerequisites.yml
+++ b/playbooks/prerequisites.yml
@@ -1,12 +1,12 @@
---
-- include: init/main.yml
+- import_playbook: init/main.yml
vars:
skip_verison: True
-- hosts: "{{ l_containerized_host_groups }}"
- vars:
- l_chg_temp: "{{ openshift_containerized_host_groups | default([]) }}"
- l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}"
- tasks:
- - include_role:
- name: container_runtime
+# This is required for container runtime for crio, only needs to run once.
+- name: Configure os_firewall
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config:oo_nodes_to_config
+ roles:
+ - role: os_firewall
+
+- import_playbook: container-runtime/private/config.yml
diff --git a/playbooks/redeploy-certificates.yml b/playbooks/redeploy-certificates.yml
index 45135c10e..b5fcb951d 100644
--- a/playbooks/redeploy-certificates.yml
+++ b/playbooks/redeploy-certificates.yml
@@ -1,26 +1,26 @@
---
-- include: init/main.yml
+- import_playbook: init/main.yml
-- include: openshift-etcd/private/redeploy-certificates.yml
+- import_playbook: openshift-etcd/private/redeploy-certificates.yml
-- include: openshift-master/private/redeploy-certificates.yml
+- import_playbook: openshift-master/private/redeploy-certificates.yml
-- include: openshift-node/private/redeploy-certificates.yml
+- import_playbook: openshift-node/private/redeploy-certificates.yml
-- include: openshift-etcd/private/restart.yml
+- import_playbook: openshift-etcd/private/restart.yml
vars:
g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}"
-- include: openshift-master/private/restart.yml
+- import_playbook: openshift-master/private/restart.yml
-- include: openshift-node/private/restart.yml
+- import_playbook: openshift-node/private/restart.yml
-- include: openshift-hosted/private/redeploy-router-certificates.yml
+- import_playbook: openshift-hosted/private/redeploy-router-certificates.yml
when: openshift_hosted_manage_router | default(true) | bool
-- include: openshift-hosted/private/redeploy-registry-certificates.yml
+- import_playbook: openshift-hosted/private/redeploy-registry-certificates.yml
when: openshift_hosted_manage_registry | default(true) | bool
-- include: openshift-master/private/revert-client-ca.yml
+- import_playbook: openshift-master/private/revert-client-ca.yml
-- include: openshift-master/private/restart.yml
+- import_playbook: openshift-master/private/restart.yml
diff --git a/requirements.txt b/requirements.txt
index be1bde18e..67cdaaff5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,6 +1,6 @@
# Versions are pinned to prevent pypi releases arbitrarily breaking
# tests with new APIs/semantics. We want to update versions deliberately.
-ansible==2.4.0.0
+ansible==2.4.1.0
boto==2.34.0
click==6.7
pyOpenSSL==16.2.0
diff --git a/roles/calico/tasks/main.yml b/roles/calico/tasks/main.yml
index 0e3863304..bbc6edd48 100644
--- a/roles/calico/tasks/main.yml
+++ b/roles/calico/tasks/main.yml
@@ -14,7 +14,6 @@
vars:
etcd_cert_prefix: calico.etcd-
etcd_cert_config_dir: "{{ openshift.common.config_base }}/calico"
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_subdir: "openshift-calico-{{ openshift.common.hostname }}"
diff --git a/roles/container_runtime/README.md b/roles/container_runtime/README.md
index e363c1714..51f469aaf 100644
--- a/roles/container_runtime/README.md
+++ b/roles/container_runtime/README.md
@@ -1,18 +1,23 @@
-Docker
+Container Runtime
=========
Ensures docker package or system container is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes.
container-daemon.json items may be found at https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
-Requirements
-------------
+This role is designed to be used with include_role and tasks_from.
-Ansible 2.2
+Entry points
+------------
+* package_docker.yml - install and setup docker container runtime.
+* systemcontainer_docker.yml - utilize docker + systemcontainer
+* systemcontainer_crio.yml - utilize crio + systemcontainer
+* registry_auth.yml - place docker login credentials.
-Mandator Role Variables
---------------
+Requirements
+------------
+Ansible 2.4
Dependencies
@@ -24,9 +29,9 @@ Example Playbook
----------------
- hosts: servers
- roles:
- - role: container_runtime
- docker_udev_workaround: "true"
+ tasks:
+ - include_role: container_runtime
+ tasks_from: package_docker.yml
License
-------
@@ -36,4 +41,4 @@ ASL 2.0
Author Information
------------------
-OpenShift operations, Red Hat, Inc
+Red Hat, Inc
diff --git a/roles/container_runtime/defaults/main.yml b/roles/container_runtime/defaults/main.yml
index 62b3e141a..dd185cb38 100644
--- a/roles/container_runtime/defaults/main.yml
+++ b/roles/container_runtime/defaults/main.yml
@@ -59,6 +59,7 @@ docker_default_storage_path: /var/lib/docker
# Set local versions of facts that must be in json format for container-daemon.json
# NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson
l_docker_log_options: "{{ l2_docker_log_options | to_json }}"
+l_docker_log_options_dict: "{{ l2_docker_log_options | oo_list_to_dict | to_json }}"
l_docker_additional_registries: "{{ l2_docker_additional_registries | to_json }}"
l_docker_blocked_registries: "{{ l2_docker_blocked_registries | to_json }}"
l_docker_insecure_registries: "{{ l2_docker_insecure_registries | to_json }}"
@@ -71,10 +72,62 @@ docker_no_proxy: "{{ openshift.common.no_proxy | default('') }}"
openshift_use_crio: False
openshift_use_crio_only: False
+l_openshift_image_tag_default: "{{ openshift_release | default('latest') }}"
+l_openshift_image_tag: "{{ openshift_image_tag | default(l_openshift_image_tag_default) | string}}"
+# --------------------- #
+# systemcontainers_crio #
+# --------------------- #
l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}"
l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}"
l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}"
-l_openshift_image_tag_default: "{{ openshift_release }}"
-l_openshift_image_tag: "{{ openshift_image_tag | default(l_openshift_image_tag_default) | string}}"
+
+openshift_crio_image_tag_default: "latest"
+
+l_crt_crio_image_tag_dict:
+ openshift-enterprise: "{{ l_openshift_image_tag }}"
+ origin: "{{ openshift_crio_image_tag | default(openshift_crio_image_tag_default) }}"
+
+l_crt_crio_image_prepend_dict:
+ openshift-enterprise: "registry.access.redhat.com/openshift3"
+ origin: "docker.io/gscrivano"
+
+l_crt_crio_image_dict:
+ Fedora:
+ crio_image_name: "cri-o-fedora"
+ crio_image_tag: "latest"
+ CentOS:
+ crio_image_name: "cri-o-centos"
+ crio_image_tag: "latest"
+ RedHat:
+ crio_image_name: "cri-o"
+ crio_image_tag: "{{ openshift_crio_image_tag | default(l_crt_crio_image_tag_dict[openshift_deployment_type]) }}"
+
+l_crio_image_prepend: "{{ l_crt_crio_image_prepend_dict[openshift_deployment_type] }}"
+l_crio_image_name: "{{ l_crt_crio_image_dict[ansible_distribution]['crio_image_name'] }}"
+l_crio_image_tag: "{{ l_crt_crio_image_dict[ansible_distribution] }}"
+
+l_crio_image_default: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}"
+l_crio_image: "{{ openshift_crio_systemcontainer_image_override | default(l_crio_image_default) }}"
+
+# ----------------------- #
+# systemcontainers_docker #
+# ----------------------- #
+l_crt_docker_image_prepend_dict:
+ Fedora: "registry.fedoraproject.org/latest"
+ Centos: "docker.io/gscrivano"
+ RedHat: "registry.access.redhat.com/openshift3"
+
+openshift_docker_image_tag_default: "latest"
+l_crt_docker_image_tag_dict:
+ openshift-enterprise: "{{ l_openshift_image_tag }}"
+ origin: "{{ openshift_docker_image_tag | default(openshift_docker_image_tag_default) }}"
+
+l_docker_image_prepend: "{{ l_crt_docker_image_prepend_dict[ansible_distribution] }}"
+l_docker_image_tag: "{{ l_crt_docker_image_tag_dict[openshift_deployment_type] }}"
+
+l_docker_image_default: "{{ l_docker_image_prepend }}/{{ openshift_docker_service_name }}:{{ l_docker_image_tag }}"
+l_docker_image: "{{ openshift_docker_systemcontainer_image_override | default(l_docker_image_default) }}"
+
+l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
diff --git a/roles/openshift_atomic/tasks/proxy.yml b/roles/container_runtime/tasks/common/atomic_proxy.yml
index dde099984..dde099984 100644
--- a/roles/openshift_atomic/tasks/proxy.yml
+++ b/roles/container_runtime/tasks/common/atomic_proxy.yml
diff --git a/roles/container_runtime/tasks/common/post.yml b/roles/container_runtime/tasks/common/post.yml
new file mode 100644
index 000000000..d790eb2c0
--- /dev/null
+++ b/roles/container_runtime/tasks/common/post.yml
@@ -0,0 +1,26 @@
+---
+- name: Ensure /var/lib/containers exists
+ file:
+ path: /var/lib/containers
+ state: directory
+
+- name: Fix SELinux Permissions on /var/lib/containers
+ command: "restorecon -R /var/lib/containers/"
+ changed_when: false
+
+- meta: flush_handlers
+
+# This needs to run after docker is restarted to account for proxy settings.
+# registry_auth is called directly with include_role in some places, so we
+# have to put it in the root of the tasks/ directory.
+- include_tasks: ../registry_auth.yml
+
+- name: stat the docker data dir
+ stat:
+ path: "{{ docker_default_storage_path }}"
+ register: dockerstat
+
+- include_tasks: setup_docker_symlink.yml
+ when:
+ - openshift_use_crio
+ - dockerstat.stat.islnk is defined and not (dockerstat.stat.islnk | bool)
diff --git a/roles/container_runtime/tasks/common/pre.yml b/roles/container_runtime/tasks/common/pre.yml
new file mode 100644
index 000000000..990fe66da
--- /dev/null
+++ b/roles/container_runtime/tasks/common/pre.yml
@@ -0,0 +1,12 @@
+---
+- include_tasks: udev_workaround.yml
+ when: docker_udev_workaround | default(False) | bool
+
+- name: Add enterprise registry, if necessary
+ set_fact:
+ l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}"
+ when:
+ - openshift.common.deployment_type == 'openshift-enterprise'
+ - openshift_docker_ent_reg != ''
+ - openshift_docker_ent_reg not in l2_docker_additional_registries
+ - not openshift_use_crio_only | bool
diff --git a/roles/container_runtime/tasks/common/setup_docker_symlink.yml b/roles/container_runtime/tasks/common/setup_docker_symlink.yml
new file mode 100644
index 000000000..d7aeb192e
--- /dev/null
+++ b/roles/container_runtime/tasks/common/setup_docker_symlink.yml
@@ -0,0 +1,38 @@
+---
+- block:
+ - name: stop the current running docker
+ systemd:
+ state: stopped
+ name: "{{ openshift_docker_service_name }}"
+
+ - name: copy "{{ docker_default_storage_path }}" to "{{ docker_alt_storage_path }}"
+ command: "cp -r {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
+ register: results
+ failed_when:
+ - results.rc != 0
+
+ - name: "Set the selinux context on {{ docker_alt_storage_path }}"
+ command: "semanage fcontext -a -e {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
+ register: results
+ failed_when:
+ - results.rc == 1
+ - "'already exists' not in results.stderr"
+
+ - name: "restorecon the {{ docker_alt_storage_path }}"
+ command: "restorecon -r {{ docker_alt_storage_path }}"
+
+ - name: Remove the old docker location
+ file:
+ state: absent
+ path: "{{ docker_default_storage_path }}"
+
+ - name: Setup the link
+ file:
+ state: link
+ src: "{{ docker_alt_storage_path }}"
+ path: "{{ docker_default_storage_path }}"
+
+ - name: start docker
+ systemd:
+ state: started
+ name: "{{ openshift_docker_service_name }}"
diff --git a/roles/container_runtime/tasks/common/syscontainer_packages.yml b/roles/container_runtime/tasks/common/syscontainer_packages.yml
new file mode 100644
index 000000000..715ed492d
--- /dev/null
+++ b/roles/container_runtime/tasks/common/syscontainer_packages.yml
@@ -0,0 +1,28 @@
+---
+
+- name: Ensure container-selinux is installed
+ package:
+ name: container-selinux
+ state: present
+ when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
+
+# Used to pull and install the system container
+- name: Ensure atomic is installed
+ package:
+ name: atomic
+ state: present
+ when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
+
+# At the time of writing the atomic command requires runc for it's own use. This
+# task is here in the even that the atomic package ever removes the dependency.
+- name: Ensure runc is installed
+ package:
+ name: runc
+ state: present
+ when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
diff --git a/roles/container_runtime/tasks/udev_workaround.yml b/roles/container_runtime/tasks/common/udev_workaround.yml
index 257c3123d..257c3123d 100644
--- a/roles/container_runtime/tasks/udev_workaround.yml
+++ b/roles/container_runtime/tasks/common/udev_workaround.yml
diff --git a/roles/container_runtime/tasks/docker_sanity.yml b/roles/container_runtime/tasks/docker_sanity.yml
new file mode 100644
index 000000000..e62cf5505
--- /dev/null
+++ b/roles/container_runtime/tasks/docker_sanity.yml
@@ -0,0 +1,27 @@
+---
+# Sanity checks to ensure the role will complete and provide helpful error
+# messages for common problems.
+
+- name: Error out if Docker pre-installed but too old
+ fail:
+ msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required."
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined
+
+- name: Error out if requested Docker is too old
+ fail:
+ msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required."
+ when: docker_version is defined and docker_version | version_compare('1.9.1', '<')
+
+# If a docker_version was requested, sanity check that we can install or upgrade to it, and
+# no downgrade is required.
+- name: Fail if Docker version requested but downgrade is required
+ fail:
+ msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested."
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>')
+
+# This involves an extremely slow migration process, users should instead run the
+# Docker 1.10 upgrade playbook to accomplish this.
+- name: Error out if attempting to upgrade Docker across the 1.10 boundary
+ fail:
+ msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed."
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=')
diff --git a/roles/container_runtime/tasks/docker_upgrade_check.yml b/roles/container_runtime/tasks/docker_upgrade_check.yml
new file mode 100644
index 000000000..f29619f42
--- /dev/null
+++ b/roles/container_runtime/tasks/docker_upgrade_check.yml
@@ -0,0 +1,67 @@
+---
+
+# This snippet determines if a Docker upgrade is required by checking the inventory
+# variables, the available packages, and sets l_docker_upgrade to True if so.
+
+- set_fact:
+ docker_upgrade: True
+ when: docker_upgrade is not defined
+
+- name: Check if Docker is installed
+ command: rpm -q docker
+ args:
+ warn: no
+ register: pkg_check
+ failed_when: pkg_check.rc > 1
+ changed_when: no
+
+- name: Get current version of Docker
+ command: "{{ repoquery_installed }} --qf '%{version}' docker"
+ register: curr_docker_version
+ retries: 4
+ until: curr_docker_version | succeeded
+ changed_when: false
+
+- name: Get latest available version of Docker
+ command: >
+ {{ repoquery_cmd }} --qf '%{version}' "docker"
+ register: avail_docker_version
+ retries: 4
+ until: avail_docker_version | succeeded
+ # Don't expect docker rpm to be available on hosts that don't already have it installed:
+ when: pkg_check.rc == 0
+ failed_when: false
+ changed_when: false
+
+- fail:
+ msg: This playbook requires access to Docker 1.12 or later
+ # Disable the 1.12 requirement if the user set a specific Docker version
+ when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.12','<')))
+
+# Default l_docker_upgrade to False, we'll set to True if an upgrade is required:
+- set_fact:
+ l_docker_upgrade: False
+
+# Make sure a docker_version is set if none was requested:
+- set_fact:
+ docker_version: "{{ avail_docker_version.stdout }}"
+ when: pkg_check.rc == 0 and docker_version is not defined
+
+- name: Flag for Docker upgrade if necessary
+ set_fact:
+ l_docker_upgrade: True
+ when: pkg_check.rc == 0 and curr_docker_version.stdout | version_compare(docker_version,'<')
+
+# Additional checks for Atomic hosts:
+- name: Determine available Docker
+ shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
+ register: g_atomic_docker_version_result
+ when: openshift.common.is_atomic | bool
+
+- set_fact:
+ l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
+ when: openshift.common.is_atomic | bool
+
+- fail:
+ msg: This playbook requires access to Docker 1.12 or later
+ when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')
diff --git a/roles/container_runtime/tasks/main.yml b/roles/container_runtime/tasks/main.yml
index 6d68082b1..96d8606c6 100644
--- a/roles/container_runtime/tasks/main.yml
+++ b/roles/container_runtime/tasks/main.yml
@@ -1,85 +1,2 @@
---
-- include_tasks: udev_workaround.yml
- when: docker_udev_workaround | default(False) | bool
-
-- name: Add enterprise registry, if necessary
- set_fact:
- l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}"
- when:
- - openshift.common.deployment_type == 'openshift-enterprise'
- - openshift_docker_ent_reg != ''
- - openshift_docker_ent_reg not in l2_docker_additional_registries
- - not openshift_use_crio_only | bool
-
-- name: Use Package Docker if Requested
- include_tasks: package_docker.yml
- when:
- - not openshift_docker_use_system_container
- - not openshift_use_crio_only
-
-- name: Ensure /var/lib/containers exists
- file:
- path: /var/lib/containers
- state: directory
-
-- name: Fix SELinux Permissions on /var/lib/containers
- command: "restorecon -R /var/lib/containers/"
- changed_when: false
-
-- name: Use System Container Docker if Requested
- include_tasks: systemcontainer_docker.yml
- when:
- - openshift_docker_use_system_container
- - not openshift_use_crio_only
-
-- name: Add CRI-O usage Requested
- include_tasks: systemcontainer_crio.yml
- when:
- - openshift_use_crio
- - openshift_docker_is_node_or_master | bool
-
-- name: stat the docker data dir
- stat:
- path: "{{ docker_default_storage_path }}"
- register: dockerstat
-
-- when:
- - openshift_use_crio
- - dockerstat.stat.islnk is defined and not (dockerstat.stat.islnk | bool)
- block:
- - name: stop the current running docker
- systemd:
- state: stopped
- name: "{{ openshift_docker_service_name }}"
-
- - name: copy "{{ docker_default_storage_path }}" to "{{ docker_alt_storage_path }}"
- command: "cp -r {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
- register: results
- failed_when:
- - results.rc != 0
-
- - name: "Set the selinux context on {{ docker_alt_storage_path }}"
- command: "semanage fcontext -a -e {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
- register: results
- failed_when:
- - results.rc == 1
- - "'already exists' not in results.stderr"
-
- - name: "restorecon the {{ docker_alt_storage_path }}"
- command: "restorecon -r {{ docker_alt_storage_path }}"
-
- - name: Remove the old docker location
- file:
- state: absent
- path: "{{ docker_default_storage_path }}"
-
- - name: Setup the link
- file:
- state: link
- src: "{{ docker_alt_storage_path }}"
- path: "{{ docker_default_storage_path }}"
-
- - name: start docker
- systemd:
- state: started
- name: "{{ openshift_docker_service_name }}"
+# This role is meant to be used with include_role and tasks_from.
diff --git a/roles/container_runtime/tasks/package_docker.yml b/roles/container_runtime/tasks/package_docker.yml
index 40ab75a25..89899c9cf 100644
--- a/roles/container_runtime/tasks/package_docker.yml
+++ b/roles/container_runtime/tasks/package_docker.yml
@@ -1,4 +1,6 @@
---
+- include_tasks: common/pre.yml
+
- name: Get current installed Docker version
command: "{{ repoquery_installed }} --qf '%{version}' docker"
when: not openshift.common.is_atomic | bool
@@ -7,35 +9,16 @@
until: curr_docker_version | succeeded
changed_when: false
-- name: Error out if Docker pre-installed but too old
- fail:
- msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined
-
-- name: Error out if requested Docker is too old
- fail:
- msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required."
- when: docker_version is defined and docker_version | version_compare('1.9.1', '<')
-
-# If a docker_version was requested, sanity check that we can install or upgrade to it, and
-# no downgrade is required.
-- name: Fail if Docker version requested but downgrade is required
- fail:
- msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>')
-
-# This involves an extremely slow migration process, users should instead run the
-# Docker 1.10 upgrade playbook to accomplish this.
-- name: Error out if attempting to upgrade Docker across the 1.10 boundary
- fail:
- msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=')
+# Some basic checks to ensure the role will complete
+- include_tasks: docker_sanity.yml
# Make sure Docker is installed, but does not update a running version.
# Docker upgrades are handled by a separate playbook.
# Note: The curr_docker_version.stdout check can be removed when https://github.com/ansible/ansible/issues/33187 gets fixed.
- name: Install Docker
- package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
+ package:
+ name: "docker{{ '-' + docker_version if docker_version is defined else '' }}"
+ state: present
when: not openshift.common.is_atomic | bool and not curr_docker_version | skipped and not curr_docker_version.stdout != ''
register: result
until: result | success
@@ -161,7 +144,4 @@
- set_fact:
docker_service_status_changed: "{{ (r_docker_package_docker_start_result | changed) and (r_docker_already_running_result.stdout != 'ActiveState=active' ) }}"
-- meta: flush_handlers
-
-# This needs to run after docker is restarted to account for proxy settings.
-- include_tasks: registry_auth.yml
+- include_tasks: common/post.yml
diff --git a/roles/container_runtime/tasks/systemcontainer_crio.yml b/roles/container_runtime/tasks/systemcontainer_crio.yml
index 48e33f0aa..61f122f3c 100644
--- a/roles/container_runtime/tasks/systemcontainer_crio.yml
+++ b/roles/container_runtime/tasks/systemcontainer_crio.yml
@@ -1,39 +1,14 @@
---
# TODO: Much of this file is shared with container engine tasks
-
-- name: Ensure container-selinux is installed
- package:
- name: container-selinux
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
-
- name: Check we are not using node as a Docker container with CRI-O
fail: msg='Cannot use CRI-O with node configured as a Docker container'
when:
- openshift.common.is_containerized | bool
- - not openshift.common.is_node_system_container | bool
-
-# Used to pull and install the system container
-- name: Ensure atomic is installed
- package:
- name: atomic
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
-
-# At the time of writing the atomic command requires runc for it's own use. This
-# task is here in the even that the atomic package ever removes the dependency.
-- name: Ensure runc is installed
- package:
- name: runc
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
+ - not l_is_node_system_container | bool
+
+- include_tasks: common/pre.yml
+- include_tasks: common/syscontainer_packages.yml
- name: Check that overlay is in the kernel
shell: lsmod | grep overlay
@@ -60,50 +35,11 @@
state: restarted
- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
-
-- block:
-
- - name: Set CRI-O image defaults
- set_fact:
- l_crio_image_prepend: "docker.io/gscrivano"
- l_crio_image_name: "cri-o-fedora"
- l_crio_image_tag: "latest"
-
- - name: Use Centos based image when distribution is CentOS
- set_fact:
- l_crio_image_name: "cri-o-centos"
- when: ansible_distribution == "CentOS"
-
- - name: Set CRI-O image tag
- set_fact:
- l_crio_image_tag: "{{ l_openshift_image_tag }}"
- when:
- - openshift_deployment_type == 'openshift-enterprise'
-
- - name: Use RHEL based image when distribution is Red Hat
- set_fact:
- l_crio_image_prepend: "registry.access.redhat.com/openshift3"
- l_crio_image_name: "cri-o"
- when: ansible_distribution == "RedHat"
-
- - name: Set the full image name
- set_fact:
- l_crio_image: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}"
-
- # For https://github.com/openshift/aos-cd-jobs/pull/624#pullrequestreview-61816548
- - name: Use a specific image if requested
- set_fact:
- l_crio_image: "{{ openshift_crio_systemcontainer_image_override }}"
- when:
- - openshift_crio_systemcontainer_image_override is defined
- - openshift_crio_systemcontainer_image_override != ""
-
- # Be nice and let the user see the variable result
- - debug:
- var: l_crio_image
+ include_tasks: common/atomic_proxy.yml
+
+# Be nice and let the user see the variable result
+- debug:
+ var: l_crio_image
# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
- name: Pre-pull CRI-O System Container image
@@ -112,7 +48,6 @@
environment:
NO_PROXY: "{{ openshift.common.no_proxy | default('') }}"
-
- name: Install CRI-O System Container
oc_atomic_container:
name: "cri-o"
@@ -154,10 +89,8 @@
daemon_reload: yes
register: start_result
-- meta: flush_handlers
-
# If we are using crio only, docker.service might not be available for
# 'docker login'
-- include_tasks: registry_auth.yml
+- include_tasks: common/post.yml
vars:
openshift_docker_alternative_creds: "{{ openshift_use_crio_only }}"
diff --git a/roles/container_runtime/tasks/systemcontainer_docker.yml b/roles/container_runtime/tasks/systemcontainer_docker.yml
index 84217e50c..10570fe34 100644
--- a/roles/container_runtime/tasks/systemcontainer_docker.yml
+++ b/roles/container_runtime/tasks/systemcontainer_docker.yml
@@ -11,32 +11,9 @@
traditional docker package install. Otherwise, comment out openshift_docker_options
in your inventory file.
-- name: Ensure container-selinux is installed
- package:
- name: container-selinux
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
-
-# Used to pull and install the system container
-- name: Ensure atomic is installed
- package:
- name: atomic
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
+- include_tasks: common/pre.yml
-# At the time of writing the atomic command requires runc for it's own use. This
-# task is here in the even that the atomic package ever removes the dependency.
-- name: Ensure runc is installed
- package:
- name: runc
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
+- include_tasks: common/syscontainer_packages.yml
# Make sure Docker is installed so we are able to use the client
- name: Install Docker so we can use the client
@@ -59,48 +36,11 @@
delay: 30
- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
-
-- block:
-
- - name: Set to default prepend
- set_fact:
- l_docker_image_prepend: "gscrivano"
- l_docker_image_tag: "latest"
-
- - name: Set container engine image tag
- set_fact:
- l_docker_image_tag: "{{ l_openshift_image_tag }}"
- when:
- - openshift_deployment_type == 'openshift-enterprise'
-
- - name: Use Red Hat Registry for image when distribution is Red Hat
- set_fact:
- l_docker_image_prepend: "registry.access.redhat.com/openshift3"
- when: ansible_distribution == 'RedHat'
-
- - name: Use Fedora Registry for image when distribution is Fedora
- set_fact:
- l_docker_image_prepend: "registry.fedoraproject.org/f25"
- when: ansible_distribution == 'Fedora'
-
- - name: Set the full image name
- set_fact:
- l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift_docker_service_name }}:{{ l_docker_image_tag }}"
-
- # For https://github.com/openshift/openshift-ansible/pull/5354#issuecomment-328552959
- - name: Use a specific image if requested
- set_fact:
- l_docker_image: "{{ openshift_docker_systemcontainer_image_override }}"
- when:
- - openshift_docker_systemcontainer_image_override is defined
- - openshift_docker_systemcontainer_image_override != ""
-
- # Be nice and let the user see the variable result
- - debug:
- var: l_docker_image
+ include_tasks: common/atomic_proxy.yml
+
+# Be nice and let the user see the variable result
+- debug:
+ var: l_docker_image
# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
- name: Pre-pull Container Engine System Container image
@@ -154,10 +94,8 @@
- set_fact:
docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result | changed }}"
-- meta: flush_handlers
-
# Since docker is running as a system container, docker login will fail to create
# credentials. Use alternate method if requiring authenticated registries.
-- include_tasks: registry_auth.yml
+- include_tasks: common/post.yml
vars:
openshift_docker_alternative_creds: True
diff --git a/roles/container_runtime/templates/crio.conf.j2 b/roles/container_runtime/templates/crio.conf.j2
index 3f066a17f..0a1ff2e0a 100644
--- a/roles/container_runtime/templates/crio.conf.j2
+++ b/roles/container_runtime/templates/crio.conf.j2
@@ -27,7 +27,7 @@ storage_option = [
[crio.api]
# listen is the path to the AF_LOCAL socket on which crio will listen.
-listen = "/var/run/crio.sock"
+listen = "/var/run/crio/crio.sock"
# stream_address is the IP address on which the stream server will listen
stream_address = ""
diff --git a/roles/container_runtime/templates/daemon.json b/roles/container_runtime/templates/daemon.json
index 383963bd3..1a72d812a 100644
--- a/roles/container_runtime/templates/daemon.json
+++ b/roles/container_runtime/templates/daemon.json
@@ -5,10 +5,10 @@
"disable-legacy-registry": false,
"exec-opts": ["native.cgroupdriver=systemd"],
"insecure-registries": {{ l_docker_insecure_registries }},
-{% if openshift_docker_log_driver is defined %}
+{% if openshift_docker_log_driver %}
"log-driver": "{{ openshift_docker_log_driver }}",
{%- endif %}
- "log-opts": {{ l_docker_log_options }},
+ "log-opts": {{ l_docker_log_options_dict }},
"runtimes": {
"oci": {
"path": "/usr/libexec/docker/docker-runc-current"
diff --git a/roles/contiv/meta/main.yml b/roles/contiv/meta/main.yml
index a2c2f98a7..52b9d09dd 100644
--- a/roles/contiv/meta/main.yml
+++ b/roles/contiv/meta/main.yml
@@ -21,7 +21,7 @@ dependencies:
etcd_client_port: 22379
etcd_conf_dir: /etc/contiv-etcd/
etcd_data_dir: /var/lib/contiv-etcd/
- etcd_ca_host: "{{ inventory_hostname }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_config_dir: /etc/contiv-etcd/
etcd_url_scheme: http
etcd_peer_url_scheme: http
diff --git a/roles/contiv/tasks/default_network.yml b/roles/contiv/tasks/default_network.yml
index f679443e0..8a928ea54 100644
--- a/roles/contiv/tasks/default_network.yml
+++ b/roles/contiv/tasks/default_network.yml
@@ -8,51 +8,64 @@
- name: Contiv | Set globals
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --fabric-mode {{ contiv_fabric_mode }} --vlan-range {{ contiv_vlan_range }} --fwd-mode {{ netplugin_fwd_mode }} --private-subnet {{ contiv_private_ext_subnet }}'
+ run_once: true
- name: Contiv | Set arp mode to flood if ACI
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --arp-mode flood'
when: contiv_fabric_mode == "aci"
+ run_once: true
- name: Contiv | Check if default-net exists
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net ls'
register: net_result
+ run_once: true
- name: Contiv | Create default-net
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway {{ contiv_default_gw }} default-net'
when: net_result.stdout.find("default-net") == -1
+ run_once: true
- name: Contiv | Create host access infra network for VxLan routing case
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_h1_subnet_default }} --gateway={{ contiv_h1_gw_default }} --nw-type="infra" contivh1'
when: (contiv_encap_mode == "vxlan") and (netplugin_fwd_mode == "routing")
+ run_once: true
#- name: Contiv | Create an allow-all policy for the default-group
# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy create ose-allow-all-policy'
# when: contiv_fabric_mode == "aci"
+# run_once: true
- name: Contiv | Set up aci external contract to consume default external contract
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -c -a {{ apic_default_external_contract }} oseExtToConsume'
when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
+ run_once: true
- name: Contiv | Set up aci external contract to provide default external contract
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -p -a {{ apic_default_external_contract }} oseExtToProvide'
when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
+ run_once: true
- name: Contiv | Create aci default-group
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create default-net default-group'
when: contiv_fabric_mode == "aci"
+ run_once: true
- name: Contiv | Add external contracts to the default-group
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create -e oseExtToConsume -e oseExtToProvide default-net default-group'
when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
+ run_once: true
#- name: Contiv | Add policy rule 1 for allow-all policy
# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d in --action allow ose-allow-all-policy 1'
# when: contiv_fabric_mode == "aci"
+# run_once: true
#- name: Contiv | Add policy rule 2 for allow-all policy
# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d out --action allow ose-allow-all-policy 2'
# when: contiv_fabric_mode == "aci"
+# run_once: true
- name: Contiv | Create default aci app profile
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" app-profile create -g default-group {{ apic_default_app_profile }}'
when: contiv_fabric_mode == "aci"
+ run_once: true
diff --git a/roles/contiv/tasks/netmaster_iptables.yml b/roles/contiv/tasks/netmaster_iptables.yml
index 07bb16ea7..c98e7b6a5 100644
--- a/roles/contiv/tasks/netmaster_iptables.yml
+++ b/roles/contiv/tasks/netmaster_iptables.yml
@@ -13,9 +13,15 @@
- name: Netmaster IPtables | Open Netmaster with iptables
command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "contiv"
with_items:
- - "{{ netmaster_port }}"
- "{{ contiv_rpc_port1 }}"
- "{{ contiv_rpc_port2 }}"
- "{{ contiv_rpc_port3 }}"
when: iptablesrules.stdout.find("contiv") == -1
notify: Save iptables rules
+
+- name: Netmaster IPtables | Open netmaster main port
+ command: /sbin/iptables -I INPUT 1 -p tcp -s {{ item }} --dport {{ netmaster_port }} -j ACCEPT -m comment --comment "contiv"
+ with_items:
+ - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + netmaster_interface].ipv4.address)|list }}"
+ when: iptablesrules.stdout.find("contiv") == -1
+ notify: Save iptables rules
diff --git a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
index 62b4716a3..a4d260279 100644
--- a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
+++ b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
@@ -27,7 +27,7 @@
- name: PkgMgr RHEL/CentOS | Install ovs
yum:
- pkg=openvswitch-2.5.0-2.el7.x86_64
+ pkg=openvswitch
state=present
environment:
http_proxy: "{{ http_proxy|default('') }}"
diff --git a/roles/contiv/templates/aci-gw.service b/roles/contiv/templates/aci-gw.service
index 90bb98001..9b3f12567 100644
--- a/roles/contiv/templates/aci-gw.service
+++ b/roles/contiv/templates/aci-gw.service
@@ -6,5 +6,8 @@ After=auditd.service systemd-user-sessions.service time-sync.target {{ openshift
ExecStart={{ bin_dir }}/aci_gw.sh start
ExecStop={{ bin_dir }}/aci_gw.sh stop
KillMode=control-group
-Restart=on-failure
+Restart=always
RestartSec=10
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/contiv/templates/netmaster.service b/roles/contiv/templates/netmaster.service
index a602c955e..ce7d0c75e 100644
--- a/roles/contiv/templates/netmaster.service
+++ b/roles/contiv/templates/netmaster.service
@@ -6,5 +6,8 @@ After=auditd.service systemd-user-sessions.service contiv-etcd.service
EnvironmentFile=/etc/default/netmaster
ExecStart={{ bin_dir }}/netmaster $NETMASTER_ARGS
KillMode=control-group
-Restart=on-failure
+Restart=always
RestartSec=10
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/contiv/templates/netplugin.service b/roles/contiv/templates/netplugin.service
index dc7b95bb5..6358d89ec 100644
--- a/roles/contiv/templates/netplugin.service
+++ b/roles/contiv/templates/netplugin.service
@@ -6,3 +6,8 @@ After=auditd.service systemd-user-sessions.service contiv-etcd.service
EnvironmentFile=/etc/default/netplugin
ExecStart={{ bin_dir }}/netplugin $NETPLUGIN_ARGS
KillMode=control-group
+Restart=always
+RestartSec=10
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/contiv_facts/tasks/rpm.yml b/roles/contiv_facts/tasks/rpm.yml
index 07401a6dd..d12436f96 100644
--- a/roles/contiv_facts/tasks/rpm.yml
+++ b/roles/contiv_facts/tasks/rpm.yml
@@ -6,10 +6,17 @@
failed_when: false
check_mode: no
+- name: RPM | Determine if firewalld enabled
+ command: "systemctl status firewalld.service"
+ register: ss
+ changed_when: false
+ failed_when: false
+ check_mode: no
+
- name: Set the has_firewalld fact
set_fact:
has_firewalld: true
- when: s.rc == 0
+ when: s.rc == 0 and ss.rc == 0
- name: Determine if iptables-services installed
command: "rpm -q iptables-services"
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index 3038ed9f6..86cea5c46 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -10,7 +10,7 @@ r_etcd_common_embedded_etcd: false
osm_etcd_image: 'registry.access.redhat.com/rhel7/etcd'
etcd_image_dict:
- origin: "registry.fedoraproject.org/f26/etcd"
+ origin: "registry.fedoraproject.org/latest/etcd"
openshift-enterprise: "{{ osm_etcd_image }}"
etcd_image: "{{ etcd_image_dict[openshift_deployment_type | default('origin')] }}"
diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml
index 879ca4f4e..f2e1fc310 100644
--- a/roles/etcd/meta/main.yml
+++ b/roles/etcd/meta/main.yml
@@ -19,3 +19,4 @@ dependencies:
- role: lib_openshift
- role: lib_os_firewall
- role: lib_utils
+- role: openshift_facts
diff --git a/roles/etcd/tasks/migration/add_ttls.yml b/roles/etcd/tasks/migration/add_ttls.yml
index 4bdc6bcc3..a4b0ff31d 100644
--- a/roles/etcd/tasks/migration/add_ttls.yml
+++ b/roles/etcd/tasks/migration/add_ttls.yml
@@ -11,7 +11,7 @@
- name: Re-introduce leases (as a replacement for key TTLs)
command: >
- oadm migrate etcd-ttl \
+ {{ openshift.common.client_binary }} adm migrate etcd-ttl \
--cert {{ r_etcd_common_master_peer_cert_file }} \
--key {{ r_etcd_common_master_peer_key_file }} \
--cacert {{ r_etcd_common_master_peer_ca_file }} \
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index 82ac4fc84..ca8b6a707 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -1,9 +1,4 @@
---
-- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
-
- name: Pull etcd system container
command: atomic pull --storage=ostree {{ etcd_image }}
register: pull_result
diff --git a/roles/flannel/defaults/main.yaml b/roles/flannel/defaults/main.yaml
index 488b6b0bc..2e4a0dc39 100644
--- a/roles/flannel/defaults/main.yaml
+++ b/roles/flannel/defaults/main.yaml
@@ -2,8 +2,8 @@
flannel_interface: "{{ ansible_default_ipv4.interface }}"
flannel_etcd_key: /openshift.com/network
etcd_hosts: "{{ etcd_urls }}"
-etcd_peer_ca_file: "{{ openshift.common.config_base }}/node/{{ 'ca' if (embedded_etcd | bool) else 'flannel.etcd-ca' }}.crt"
-etcd_peer_cert_file: "{{ openshift.common.config_base }}/node/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'flannel.etcd-client' }}.crt"
-etcd_peer_key_file: "{{ openshift.common.config_base }}/node/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'flannel.etcd-client' }}.key"
+etcd_peer_ca_file: "{{ openshift.common.config_base }}/node/flannel.etcd-ca.crt"
+etcd_peer_cert_file: "{{ openshift.common.config_base }}/node/flannel.etcd-client.crt"
+etcd_peer_key_file: "{{ openshift.common.config_base }}/node/flannel.etcd-client.key"
openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml
index 80e4d391d..705d39f9a 100644
--- a/roles/flannel/handlers/main.yml
+++ b/roles/flannel/handlers/main.yml
@@ -15,7 +15,7 @@
- name: restart node
systemd:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
state: restarted
register: l_restart_node_result
until: not l_restart_node_result | failed
diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml
index 1d0f5df6a..cd11fd9ff 100644
--- a/roles/flannel_register/defaults/main.yaml
+++ b/roles/flannel_register/defaults/main.yaml
@@ -4,6 +4,6 @@ flannel_subnet_len: "{{ 32 - (openshift.master.sdn_host_subnet_length | int) }}"
flannel_etcd_key: /openshift.com/network
etcd_hosts: "{{ etcd_urls }}"
etcd_conf_dir: "{{ openshift.common.config_base }}/master"
-etcd_peer_ca_file: "{{ etcd_conf_dir + '/ca.crt' if (openshift.master.embedded_etcd | bool) else etcd_conf_dir + '/master.etcd-ca.crt' }}"
+etcd_peer_ca_file: "{{ etcd_conf_dir + '/master.etcd-ca.crt' }}"
etcd_peer_cert_file: "{{ etcd_conf_dir }}/master.etcd-client.crt"
etcd_peer_key_file: "{{ etcd_conf_dir }}/master.etcd-client.key"
diff --git a/roles/kuryr/tasks/node.yaml b/roles/kuryr/tasks/node.yaml
index ffe814713..08f2d5adc 100644
--- a/roles/kuryr/tasks/node.yaml
+++ b/roles/kuryr/tasks/node.yaml
@@ -36,7 +36,7 @@
- name: Configure OpenShift node with disabled service proxy
lineinfile:
- dest: "/etc/sysconfig/{{ openshift.common.service_type }}-node"
+ dest: "/etc/sysconfig/{{ openshift_service_type }}-node"
regexp: '^OPTIONS="?(.*?)"?$'
backrefs: yes
backup: yes
@@ -44,5 +44,5 @@
- name: force node restart to disable the proxy
service:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
state: restarted
diff --git a/roles/lib_utils/library/oo_ec2_group.py b/roles/lib_utils/library/oo_ec2_group.py
new file mode 100644
index 000000000..615021ac5
--- /dev/null
+++ b/roles/lib_utils/library/oo_ec2_group.py
@@ -0,0 +1,903 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# pylint: skip-file
+# flake8: noqa
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = '''
+---
+module: ec2_group
+author: "Andrew de Quincey (@adq)"
+version_added: "1.3"
+requirements: [ boto3 ]
+short_description: maintain an ec2 VPC security group.
+description:
+ - maintains ec2 security groups. This module has a dependency on python-boto >= 2.5
+options:
+ name:
+ description:
+ - Name of the security group.
+ - One of and only one of I(name) or I(group_id) is required.
+ - Required if I(state=present).
+ required: false
+ group_id:
+ description:
+ - Id of group to delete (works only with absent).
+ - One of and only one of I(name) or I(group_id) is required.
+ required: false
+ version_added: "2.4"
+ description:
+ description:
+ - Description of the security group. Required when C(state) is C(present).
+ required: false
+ vpc_id:
+ description:
+ - ID of the VPC to create the group in.
+ required: false
+ rules:
+ description:
+ - List of firewall inbound rules to enforce in this group (see example). If none are supplied,
+ no inbound rules will be enabled. Rules list may include its own name in `group_name`.
+ This allows idempotent loopback additions (e.g. allow group to access itself).
+ Rule sources list support was added in version 2.4. This allows to define multiple sources per
+ source type as well as multiple source types per rule. Prior to 2.4 an individual source is allowed.
+ required: false
+ rules_egress:
+ description:
+ - List of firewall outbound rules to enforce in this group (see example). If none are supplied,
+ a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled.
+ Rule Egress sources list support was added in version 2.4.
+ required: false
+ version_added: "1.6"
+ state:
+ version_added: "1.4"
+ description:
+ - Create or delete a security group
+ required: false
+ default: 'present'
+ choices: [ "present", "absent" ]
+ aliases: []
+ purge_rules:
+ version_added: "1.8"
+ description:
+ - Purge existing rules on security group that are not found in rules
+ required: false
+ default: 'true'
+ aliases: []
+ purge_rules_egress:
+ version_added: "1.8"
+ description:
+ - Purge existing rules_egress on security group that are not found in rules_egress
+ required: false
+ default: 'true'
+ aliases: []
+ tags:
+ version_added: "2.4"
+ description:
+ - A dictionary of one or more tags to assign to the security group.
+ required: false
+ purge_tags:
+ version_added: "2.4"
+ description:
+ - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then
+ tags will not be modified.
+ required: false
+ default: yes
+ choices: [ 'yes', 'no' ]
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+
+notes:
+ - If a rule declares a group_name and that group doesn't exist, it will be
+ automatically created. In that case, group_desc should be provided as well.
+ The module will refuse to create a depended-on group without a description.
+'''
+
+EXAMPLES = '''
+- name: example ec2 group
+ ec2_group:
+ name: example
+ description: an example EC2 group
+ vpc_id: 12345
+ region: eu-west-1
+ aws_secret_key: SECRET
+ aws_access_key: ACCESS
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 10.0.0.0/8
+ - proto: tcp
+ from_port: 443
+ to_port: 443
+ group_id: amazon-elb/sg-87654321/amazon-elb-sg
+ - proto: tcp
+ from_port: 3306
+ to_port: 3306
+ group_id: 123412341234/sg-87654321/exact-name-of-sg
+ - proto: udp
+ from_port: 10050
+ to_port: 10050
+ cidr_ip: 10.0.0.0/8
+ - proto: udp
+ from_port: 10051
+ to_port: 10051
+ group_id: sg-12345678
+ - proto: icmp
+ from_port: 8 # icmp type, -1 = any type
+ to_port: -1 # icmp subtype, -1 = any subtype
+ cidr_ip: 10.0.0.0/8
+ - proto: all
+ # the containing group name may be specified here
+ group_name: example
+ rules_egress:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ cidr_ipv6: 64:ff9b::/96
+ group_name: example-other
+ # description to use if example-other needs to be created
+ group_desc: other example EC2 group
+
+- name: example2 ec2 group
+ ec2_group:
+ name: example2
+ description: an example2 EC2 group
+ vpc_id: 12345
+ region: eu-west-1
+ rules:
+ # 'ports' rule keyword was introduced in version 2.4. It accepts a single port value or a list of values including ranges (from_port-to_port).
+ - proto: tcp
+ ports: 22
+ group_name: example-vpn
+ - proto: tcp
+ ports:
+ - 80
+ - 443
+ - 8080-8099
+ cidr_ip: 0.0.0.0/0
+ # Rule sources list support was added in version 2.4. This allows to define multiple sources per source type as well as multiple source types per rule.
+ - proto: tcp
+ ports:
+ - 6379
+ - 26379
+ group_name:
+ - example-vpn
+ - example-redis
+ - proto: tcp
+ ports: 5665
+ group_name: example-vpn
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ cidr_ipv6:
+ - 2607:F8B0::/32
+ - 64:ff9b::/96
+ group_id:
+ - sg-edcd9784
+
+- name: "Delete group by its id"
+ ec2_group:
+ group_id: sg-33b4ee5b
+ state: absent
+'''
+
+RETURN = '''
+group_name:
+ description: Security group name
+ sample: My Security Group
+ type: string
+ returned: on create/update
+group_id:
+ description: Security group id
+ sample: sg-abcd1234
+ type: string
+ returned: on create/update
+description:
+ description: Description of security group
+ sample: My Security Group
+ type: string
+ returned: on create/update
+tags:
+ description: Tags associated with the security group
+ sample:
+ Name: My Security Group
+ Purpose: protecting stuff
+ type: dict
+ returned: on create/update
+vpc_id:
+ description: ID of VPC to which the security group belongs
+ sample: vpc-abcd1234
+ type: string
+ returned: on create/update
+ip_permissions:
+ description: Inbound rules associated with the security group.
+ sample:
+ - from_port: 8182
+ ip_protocol: tcp
+ ip_ranges:
+ - cidr_ip: "1.1.1.1/32"
+ ipv6_ranges: []
+ prefix_list_ids: []
+ to_port: 8182
+ user_id_group_pairs: []
+ type: list
+ returned: on create/update
+ip_permissions_egress:
+ description: Outbound rules associated with the security group.
+ sample:
+ - ip_protocol: -1
+ ip_ranges:
+ - cidr_ip: "0.0.0.0/0"
+ ipv6_ranges: []
+ prefix_list_ids: []
+ user_id_group_pairs: []
+ type: list
+ returned: on create/update
+owner_id:
+ description: AWS Account ID of the security group
+ sample: 123456789012
+ type: int
+ returned: on create/update
+'''
+
+import json
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import boto3_conn
+from ansible.module_utils.ec2 import get_aws_connection_info
+from ansible.module_utils.ec2 import ec2_argument_spec
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.ec2 import HAS_BOTO3
+from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags
+from ansible.module_utils.ec2 import AWSRetry
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by imported HAS_BOTO3
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_security_groups_with_backoff(connection, **kwargs):
+ return connection.describe_security_groups(**kwargs)
+
+
+def deduplicate_rules_args(rules):
+ """Returns unique rules"""
+ if rules is None:
+ return None
+ return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values())
+
+
+def make_rule_key(prefix, rule, group_id, cidr_ip):
+ if 'proto' in rule:
+ proto, from_port, to_port = [rule.get(x, None) for x in ('proto', 'from_port', 'to_port')]
+ elif 'IpProtocol' in rule:
+ proto, from_port, to_port = [rule.get(x, None) for x in ('IpProtocol', 'FromPort', 'ToPort')]
+ if proto not in ['icmp', 'tcp', 'udp'] and from_port == -1 and to_port == -1:
+ from_port = 'none'
+ to_port = 'none'
+ key = "%s-%s-%s-%s-%s-%s" % (prefix, proto, from_port, to_port, group_id, cidr_ip)
+ return key.lower().replace('-none', '-None')
+
+
+def add_rules_to_lookup(ipPermissions, group_id, prefix, dict):
+ for rule in ipPermissions:
+ for groupGrant in rule.get('UserIdGroupPairs', []):
+ dict[make_rule_key(prefix, rule, group_id, groupGrant.get('GroupId'))] = (rule, groupGrant)
+ for ipv4Grants in rule.get('IpRanges', []):
+ dict[make_rule_key(prefix, rule, group_id, ipv4Grants.get('CidrIp'))] = (rule, ipv4Grants)
+ for ipv6Grants in rule.get('Ipv6Ranges', []):
+ dict[make_rule_key(prefix, rule, group_id, ipv6Grants.get('CidrIpv6'))] = (rule, ipv6Grants)
+
+
+def validate_rule(module, rule):
+ VALID_PARAMS = ('cidr_ip', 'cidr_ipv6',
+ 'group_id', 'group_name', 'group_desc',
+ 'proto', 'from_port', 'to_port')
+ if not isinstance(rule, dict):
+ module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule))
+ for k in rule:
+ if k not in VALID_PARAMS:
+ module.fail_json(msg='Invalid rule parameter \'{}\''.format(k))
+
+ if 'group_id' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg='Specify group_id OR cidr_ip, not both')
+ elif 'group_name' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg='Specify group_name OR cidr_ip, not both')
+ elif 'group_id' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_id OR cidr_ipv6, not both")
+ elif 'group_name' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_name OR cidr_ipv6, not both")
+ elif 'cidr_ip' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both")
+ elif 'group_id' in rule and 'group_name' in rule:
+ module.fail_json(msg='Specify group_id OR group_name, not both')
+
+
+def get_target_from_rule(module, client, rule, name, group, groups, vpc_id):
+ """
+ Returns tuple of (group_id, ip) after validating rule params.
+
+ rule: Dict describing a rule.
+ name: Name of the security group being managed.
+ groups: Dict of all available security groups.
+
+ AWS accepts an ip range or a security group as target of a rule. This
+ function validate the rule specification and return either a non-None
+ group_id or a non-None ip range.
+ """
+
+ FOREIGN_SECURITY_GROUP_REGEX = '^(\S+)/(sg-\S+)/(\S+)'
+ group_id = None
+ group_name = None
+ ip = None
+ ipv6 = None
+ target_group_created = False
+
+ if 'group_id' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg="Specify group_id OR cidr_ip, not both")
+ elif 'group_name' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg="Specify group_name OR cidr_ip, not both")
+ elif 'group_id' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_id OR cidr_ipv6, not both")
+ elif 'group_name' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_name OR cidr_ipv6, not both")
+ elif 'group_id' in rule and 'group_name' in rule:
+ module.fail_json(msg="Specify group_id OR group_name, not both")
+ elif 'cidr_ip' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both")
+ elif rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']):
+ # this is a foreign Security Group. Since you can't fetch it you must create an instance of it
+ owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups()
+ group_instance = dict(GroupId=group_id, GroupName=group_name)
+ groups[group_id] = group_instance
+ groups[group_name] = group_instance
+ elif 'group_id' in rule:
+ group_id = rule['group_id']
+ elif 'group_name' in rule:
+ group_name = rule['group_name']
+ if group_name == name:
+ group_id = group['GroupId']
+ groups[group_id] = group
+ groups[group_name] = group
+ elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'):
+ # both are VPC groups, this is ok
+ group_id = groups[group_name]['GroupId']
+ elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')):
+ # both are EC2 classic, this is ok
+ group_id = groups[group_name]['GroupId']
+ else:
+ # if we got here, either the target group does not exist, or there
+ # is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC
+ # is bad, so we have to create a new SG because no compatible group
+ # exists
+ if not rule.get('group_desc', '').strip():
+ module.fail_json(msg="group %s will be automatically created by rule %s and "
+ "no description was provided" % (group_name, rule))
+ if not module.check_mode:
+ params = dict(GroupName=group_name, Description=rule['group_desc'])
+ if vpc_id:
+ params['VpcId'] = vpc_id
+ auto_group = client.create_security_group(**params)
+ group_id = auto_group['GroupId']
+ groups[group_id] = auto_group
+ groups[group_name] = auto_group
+ target_group_created = True
+ elif 'cidr_ip' in rule:
+ ip = rule['cidr_ip']
+ elif 'cidr_ipv6' in rule:
+ ipv6 = rule['cidr_ipv6']
+
+ return group_id, ip, ipv6, target_group_created
+
+
+def ports_expand(ports):
+ # takes a list of ports and returns a list of (port_from, port_to)
+ ports_expanded = []
+ for port in ports:
+ if not isinstance(port, str):
+ ports_expanded.append((port,) * 2)
+ elif '-' in port:
+ ports_expanded.append(tuple(p.strip() for p in port.split('-', 1)))
+ else:
+ ports_expanded.append((port.strip(),) * 2)
+
+ return ports_expanded
+
+
+def rule_expand_ports(rule):
+ # takes a rule dict and returns a list of expanded rule dicts
+ if 'ports' not in rule:
+ return [rule]
+
+ ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']]
+
+ rule_expanded = []
+ for from_to in ports_expand(ports):
+ temp_rule = rule.copy()
+ del temp_rule['ports']
+ temp_rule['from_port'], temp_rule['to_port'] = from_to
+ rule_expanded.append(temp_rule)
+
+ return rule_expanded
+
+
+def rules_expand_ports(rules):
+ # takes a list of rules and expands it based on 'ports'
+ if not rules:
+ return rules
+
+ return [rule for rule_complex in rules
+ for rule in rule_expand_ports(rule_complex)]
+
+
+def rule_expand_source(rule, source_type):
+ # takes a rule dict and returns a list of expanded rule dicts for specified source_type
+ sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]]
+ source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name')
+
+ rule_expanded = []
+ for source in sources:
+ temp_rule = rule.copy()
+ for s in source_types_all:
+ temp_rule.pop(s, None)
+ temp_rule[source_type] = source
+ rule_expanded.append(temp_rule)
+
+ return rule_expanded
+
+
+def rule_expand_sources(rule):
+ # takes a rule dict and returns a list of expanded rule discts
+ source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name') if stype in rule)
+
+ return [r for stype in source_types
+ for r in rule_expand_source(rule, stype)]
+
+
+def rules_expand_sources(rules):
+ # takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name'
+ if not rules:
+ return rules
+
+ return [rule for rule_complex in rules
+ for rule in rule_expand_sources(rule_complex)]
+
+
+def authorize_ip(type, changed, client, group, groupRules,
+ ip, ip_permission, module, rule, ethertype):
+ # If rule already exists, don't later delete it
+ for thisip in ip:
+ rule_id = make_rule_key(type, rule, group['GroupId'], thisip)
+ if rule_id in groupRules:
+ del groupRules[rule_id]
+ else:
+ if not module.check_mode:
+ ip_permission = serialize_ip_grant(rule, thisip, ethertype)
+ if ip_permission:
+ try:
+ if type == "in":
+ client.authorize_security_group_ingress(GroupId=group['GroupId'],
+ IpPermissions=[ip_permission])
+ elif type == "out":
+ client.authorize_security_group_egress(GroupId=group['GroupId'],
+ IpPermissions=[ip_permission])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to authorize %s for ip %s security group '%s' - %s" %
+ (type, thisip, group['GroupName'], e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+ return changed, ip_permission
+
+
+def serialize_group_grant(group_id, rule):
+ permission = {'IpProtocol': rule['proto'],
+ 'FromPort': rule['from_port'],
+ 'ToPort': rule['to_port'],
+ 'UserIdGroupPairs': [{'GroupId': group_id}]}
+
+ return fix_port_and_protocol(permission)
+
+
+def serialize_revoke(grant, rule):
+ permission = dict()
+ fromPort = rule['FromPort'] if 'FromPort' in rule else None
+ toPort = rule['ToPort'] if 'ToPort' in rule else None
+ if 'GroupId' in grant:
+ permission = {'IpProtocol': rule['IpProtocol'],
+ 'FromPort': fromPort,
+ 'ToPort': toPort,
+ 'UserIdGroupPairs': [{'GroupId': grant['GroupId']}]
+ }
+ elif 'CidrIp' in grant:
+ permission = {'IpProtocol': rule['IpProtocol'],
+ 'FromPort': fromPort,
+ 'ToPort': toPort,
+ 'IpRanges': [grant]
+ }
+ elif 'CidrIpv6' in grant:
+ permission = {'IpProtocol': rule['IpProtocol'],
+ 'FromPort': fromPort,
+ 'ToPort': toPort,
+ 'Ipv6Ranges': [grant]
+ }
+ return fix_port_and_protocol(permission)
+
+
+def serialize_ip_grant(rule, thisip, ethertype):
+ permission = {'IpProtocol': rule['proto'],
+ 'FromPort': rule['from_port'],
+ 'ToPort': rule['to_port']}
+ if ethertype == "ipv4":
+ permission['IpRanges'] = [{'CidrIp': thisip}]
+ elif ethertype == "ipv6":
+ permission['Ipv6Ranges'] = [{'CidrIpv6': thisip}]
+
+ return fix_port_and_protocol(permission)
+
+
+def fix_port_and_protocol(permission):
+ for key in ['FromPort', 'ToPort']:
+ if key in permission:
+ if permission[key] is None:
+ del permission[key]
+ else:
+ permission[key] = int(permission[key])
+
+ permission['IpProtocol'] = str(permission['IpProtocol'])
+
+ return permission
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ name=dict(),
+ group_id=dict(),
+ description=dict(),
+ vpc_id=dict(),
+ rules=dict(type='list'),
+ rules_egress=dict(type='list'),
+ state=dict(default='present', type='str', choices=['present', 'absent']),
+ purge_rules=dict(default=True, required=False, type='bool'),
+ purge_rules_egress=dict(default=True, required=False, type='bool'),
+ tags=dict(required=False, type='dict', aliases=['resource_tags']),
+ purge_tags=dict(default=True, required=False, type='bool')
+ )
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['name', 'group_id']],
+ required_if=[['state', 'present', ['name']]],
+ )
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ name = module.params['name']
+ group_id = module.params['group_id']
+ description = module.params['description']
+ vpc_id = module.params['vpc_id']
+ rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(module.params['rules'])))
+ rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(module.params['rules_egress'])))
+ state = module.params.get('state')
+ purge_rules = module.params['purge_rules']
+ purge_rules_egress = module.params['purge_rules_egress']
+ tags = module.params['tags']
+ purge_tags = module.params['purge_tags']
+
+ if state == 'present' and not description:
+ module.fail_json(msg='Must provide description when state is present.')
+
+ changed = False
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
+ if not region:
+ module.fail_json(msg="The AWS region must be specified as an "
+ "environment variable or in the AWS credentials "
+ "profile.")
+ client = boto3_conn(module, conn_type='client', resource='ec2', endpoint=ec2_url, region=region, **aws_connect_params)
+ group = None
+ groups = dict()
+ security_groups = []
+ # do get all security groups
+ # find if the group is present
+ try:
+ response = get_security_groups_with_backoff(client)
+ security_groups = response.get('SecurityGroups', [])
+ except botocore.exceptions.NoCredentialsError as e:
+ module.fail_json(msg="Error in describe_security_groups: %s" % "Unable to locate credentials", exception=traceback.format_exc())
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Error in describe_security_groups: %s" % e, exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ for sg in security_groups:
+ groups[sg['GroupId']] = sg
+ groupName = sg['GroupName']
+ if groupName in groups:
+ # Prioritise groups from the current VPC
+ # even if current VPC is EC2-Classic
+ if groups[groupName].get('VpcId') == vpc_id:
+ # Group saved already matches current VPC, change nothing
+ pass
+ elif vpc_id is None and groups[groupName].get('VpcId') is None:
+ # We're in EC2 classic, and the group already saved is as well
+ # No VPC groups can be used alongside EC2 classic groups
+ pass
+ else:
+ # the current SG stored has no direct match, so we can replace it
+ groups[groupName] = sg
+ else:
+ groups[groupName] = sg
+
+ if group_id and sg['GroupId'] == group_id:
+ group = sg
+ elif groupName == name and (vpc_id is None or sg.get('VpcId') == vpc_id):
+ group = sg
+
+ # Ensure requested group is absent
+ if state == 'absent':
+ if group:
+ # found a match, delete it
+ try:
+ if not module.check_mode:
+ client.delete_security_group(GroupId=group['GroupId'])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to delete security group '%s' - %s" % (group, e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ else:
+ group = None
+ changed = True
+ else:
+ # no match found, no changes required
+ pass
+
+ # Ensure requested group is present
+ elif state == 'present':
+ if group:
+ # existing group
+ if group['Description'] != description:
+ module.warn("Group description does not match existing group. Descriptions cannot be changed without deleting "
+ "and re-creating the security group. Try using state=absent to delete, then rerunning this task.")
+
+ # if the group doesn't exist, create it now
+ else:
+ # no match found, create it
+ if not module.check_mode:
+ params = dict(GroupName=name, Description=description)
+ if vpc_id:
+ params['VpcId'] = vpc_id
+ group = client.create_security_group(**params)
+ # When a group is created, an egress_rule ALLOW ALL
+ # to 0.0.0.0/0 is added automatically but it's not
+ # reflected in the object returned by the AWS API
+ # call. We re-read the group for getting an updated object
+ # amazon sometimes takes a couple seconds to update the security group so wait till it exists
+ while True:
+ group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
+ if group.get('VpcId') and not group.get('IpPermissionsEgress'):
+ pass
+ else:
+ break
+
+ changed = True
+
+ if tags is not None:
+ current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', []))
+ tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags)
+ if tags_to_delete:
+ try:
+ client.delete_tags(Resources=[group['GroupId']], Tags=[{'Key': tag} for tag in tags_to_delete])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+
+ # Add/update tags
+ if tags_need_modify:
+ try:
+ client.create_tags(Resources=[group['GroupId']], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify))
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+
+ else:
+ module.fail_json(msg="Unsupported state requested: %s" % state)
+
+ # create a lookup for all existing rules on the group
+ ip_permission = []
+ if group:
+ # Manage ingress rules
+ groupRules = {}
+ add_rules_to_lookup(group['IpPermissions'], group['GroupId'], 'in', groupRules)
+ # Now, go through all provided rules and ensure they are there.
+ if rules is not None:
+ for rule in rules:
+ validate_rule(module, rule)
+ group_id, ip, ipv6, target_group_created = get_target_from_rule(module, client, rule, name,
+ group, groups, vpc_id)
+ if target_group_created:
+ changed = True
+
+ if rule['proto'] in ('all', '-1', -1):
+ rule['proto'] = -1
+ rule['from_port'] = None
+ rule['to_port'] = None
+
+ if group_id:
+ rule_id = make_rule_key('in', rule, group['GroupId'], group_id)
+ if rule_id in groupRules:
+ del groupRules[rule_id]
+ else:
+ if not module.check_mode:
+ ip_permission = serialize_group_grant(group_id, rule)
+ if ip_permission:
+ ips = ip_permission
+ if vpc_id:
+ [useridpair.update({'VpcId': vpc_id}) for useridpair in
+ ip_permission.get('UserIdGroupPairs', [])]
+ try:
+ client.authorize_security_group_ingress(GroupId=group['GroupId'], IpPermissions=[ips])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(
+ msg="Unable to authorize ingress for group %s security group '%s' - %s" %
+ (group_id, group['GroupName'], e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+ elif ip:
+ # Convert ip to list we can iterate over
+ if ip and not isinstance(ip, list):
+ ip = [ip]
+
+ changed, ip_permission = authorize_ip("in", changed, client, group, groupRules, ip, ip_permission,
+ module, rule, "ipv4")
+ elif ipv6:
+ # Convert ip to list we can iterate over
+ if not isinstance(ipv6, list):
+ ipv6 = [ipv6]
+ # If rule already exists, don't later delete it
+ changed, ip_permission = authorize_ip("in", changed, client, group, groupRules, ipv6, ip_permission,
+ module, rule, "ipv6")
+ # Finally, remove anything left in the groupRules -- these will be defunct rules
+ if purge_rules:
+ for (rule, grant) in groupRules.values():
+ ip_permission = serialize_revoke(grant, rule)
+ if not module.check_mode:
+ try:
+ client.revoke_security_group_ingress(GroupId=group['GroupId'], IpPermissions=[ip_permission])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(
+ msg="Unable to revoke ingress for security group '%s' - %s" %
+ (group['GroupName'], e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+
+ # Manage egress rules
+ groupRules = {}
+ add_rules_to_lookup(group['IpPermissionsEgress'], group['GroupId'], 'out', groupRules)
+ # Now, go through all provided rules and ensure they are there.
+ if rules_egress is not None:
+ for rule in rules_egress:
+ validate_rule(module, rule)
+ group_id, ip, ipv6, target_group_created = get_target_from_rule(module, client, rule, name,
+ group, groups, vpc_id)
+ if target_group_created:
+ changed = True
+
+ if rule['proto'] in ('all', '-1', -1):
+ rule['proto'] = -1
+ rule['from_port'] = None
+ rule['to_port'] = None
+
+ if group_id:
+ rule_id = make_rule_key('out', rule, group['GroupId'], group_id)
+ if rule_id in groupRules:
+ del groupRules[rule_id]
+ else:
+ if not module.check_mode:
+ ip_permission = serialize_group_grant(group_id, rule)
+ if ip_permission:
+ ips = ip_permission
+ if vpc_id:
+ [useridpair.update({'VpcId': vpc_id}) for useridpair in
+ ip_permission.get('UserIdGroupPairs', [])]
+ try:
+ client.authorize_security_group_egress(GroupId=group['GroupId'], IpPermissions=[ips])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(
+ msg="Unable to authorize egress for group %s security group '%s' - %s" %
+ (group_id, group['GroupName'], e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+ elif ip:
+ # Convert ip to list we can iterate over
+ if not isinstance(ip, list):
+ ip = [ip]
+ changed, ip_permission = authorize_ip("out", changed, client, group, groupRules, ip,
+ ip_permission, module, rule, "ipv4")
+ elif ipv6:
+ # Convert ip to list we can iterate over
+ if not isinstance(ipv6, list):
+ ipv6 = [ipv6]
+ # If rule already exists, don't later delete it
+ changed, ip_permission = authorize_ip("out", changed, client, group, groupRules, ipv6,
+ ip_permission, module, rule, "ipv6")
+ elif vpc_id is not None:
+ # when no egress rules are specified and we're in a VPC,
+ # we add in a default allow all out rule, which was the
+ # default behavior before egress rules were added
+ default_egress_rule = 'out--1-None-None-' + group['GroupId'] + '-0.0.0.0/0'
+ if default_egress_rule not in groupRules:
+ if not module.check_mode:
+ ip_permission = [{'IpProtocol': '-1',
+ 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]
+ }
+ ]
+ try:
+ client.authorize_security_group_egress(GroupId=group['GroupId'], IpPermissions=ip_permission)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to authorize egress for ip %s security group '%s' - %s" %
+ ('0.0.0.0/0',
+ group['GroupName'],
+ e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+ else:
+ # make sure the default egress rule is not removed
+ del groupRules[default_egress_rule]
+
+ # Finally, remove anything left in the groupRules -- these will be defunct rules
+ if purge_rules_egress and vpc_id is not None:
+ for (rule, grant) in groupRules.values():
+ # we shouldn't be revoking 0.0.0.0 egress
+ if grant != '0.0.0.0/0':
+ ip_permission = serialize_revoke(grant, rule)
+ if not module.check_mode:
+ try:
+ client.revoke_security_group_egress(GroupId=group['GroupId'], IpPermissions=[ip_permission])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to revoke egress for ip %s security group '%s' - %s" %
+ (grant, group['GroupName'], e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+
+ if group:
+ security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
+ security_group = camel_dict_to_snake_dict(security_group)
+ security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', []),
+ tag_name_key_name='key', tag_value_key_name='value')
+ module.exit_json(changed=changed, **security_group)
+ else:
+ module.exit_json(changed=changed, group_id=None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml
index cb83c8ead..7b55dda56 100644
--- a/roles/nuage_master/handlers/main.yaml
+++ b/roles/nuage_master/handlers/main.yaml
@@ -1,6 +1,6 @@
---
- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
when: >
(openshift_master_ha | bool) and
(not master_api_service_status_changed | default(false))
@@ -8,7 +8,7 @@
# TODO: need to fix up ignore_errors here
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
diff --git a/roles/nuage_node/handlers/main.yaml b/roles/nuage_node/handlers/main.yaml
index e68ae74bd..ede6f2125 100644
--- a/roles/nuage_node/handlers/main.yaml
+++ b/roles/nuage_node/handlers/main.yaml
@@ -1,7 +1,7 @@
---
- name: restart node
become: yes
- systemd: name={{ openshift.common.service_type }}-node daemon-reload=yes state=restarted
+ systemd: name={{ openshift_service_type }}-node daemon-reload=yes state=restarted
- name: save iptable rules
become: yes
diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml
index fdf01b7c2..88d62de49 100644
--- a/roles/nuage_node/vars/main.yaml
+++ b/roles/nuage_node/vars/main.yaml
@@ -23,5 +23,5 @@ cni_conf_dir: "/etc/cni/net.d/"
cni_bin_dir: "/opt/cni/bin/"
nuage_plugin_crt_dir: /usr/share/vsp-openshift
-openshift_atomic_node_config_file: /etc/sysconfig/{{ openshift.common.service_type }}-node
+openshift_atomic_node_config_file: /etc/sysconfig/{{ openshift_service_type }}-node
nuage_atomic_docker_additional_mounts: "NUAGE_ADDTL_BIND_MOUNTS=-v /var/usr/share/vsp-openshift:/var/usr/share/vsp-openshift -v /etc/default:/etc/default -v /var/run:/var/run -v /opt/cni/bin:/opt/cni/bin -v /etc/cni/net.d:/etc/cni/net.d"
diff --git a/roles/openshift_atomic/README.md b/roles/openshift_atomic/README.md
deleted file mode 100644
index 8c10c9991..000000000
--- a/roles/openshift_atomic/README.md
+++ /dev/null
@@ -1,28 +0,0 @@
-OpenShift Atomic
-================
-
-This role houses atomic specific tasks.
-
-Requirements
-------------
-
-Role Variables
---------------
-
-Dependencies
-------------
-
-Example Playbook
-----------------
-
-```
-- name: Ensure atomic proxies are defined
- hosts: localhost
- roles:
- - role: openshift_atomic
-```
-
-License
--------
-
-Apache License Version 2.0
diff --git a/roles/openshift_atomic/meta/main.yml b/roles/openshift_atomic/meta/main.yml
deleted file mode 100644
index ea129f514..000000000
--- a/roles/openshift_atomic/meta/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-galaxy_info:
- author: OpenShift
- description: Atomic related tasks
- company: Red Hat, Inc
- license: ASL 2.0
- min_ansible_version: 2.2
- platforms:
- - name: EL
- versions:
- - 7
-dependencies:
-- role: lib_openshift
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index 42ef22846..74e5d1dde 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -6,9 +6,7 @@ openshift_aws_create_security_groups: True
openshift_aws_create_launch_config: True
openshift_aws_create_scale_group: True
-openshift_aws_current_version: ''
-openshift_aws_new_version: ''
-
+openshift_aws_node_group_upgrade: False
openshift_aws_wait_for_ssh: True
openshift_aws_clusterid: default
@@ -19,7 +17,6 @@ openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}"
openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external"
openshift_aws_iam_cert_path: ''
openshift_aws_iam_cert_key_path: ''
-openshift_aws_scale_group_basename: "{{ openshift_aws_clusterid }} openshift"
openshift_aws_iam_role_name: openshift_node_describe_instances
openshift_aws_iam_role_policy_json: "{{ lookup('file', 'describeinstances.json') }}"
@@ -34,14 +31,12 @@ openshift_aws_ami_name: openshift-gi
openshift_aws_base_ami_name: ami_base
openshift_aws_launch_config_bootstrap_token: ''
-openshift_aws_launch_config_basename: "{{ openshift_aws_clusterid }}"
openshift_aws_users: []
openshift_aws_ami_tags:
bootstrap: "true"
openshift-created: "true"
- clusterid: "{{ openshift_aws_clusterid }}"
parent: "{{ openshift_aws_base_ami | default('unknown') }}"
openshift_aws_s3_mode: create
@@ -124,6 +119,20 @@ openshift_aws_ami_map:
infra: "{{ openshift_aws_ami }}"
compute: "{{ openshift_aws_ami }}"
+openshift_aws_master_group:
+- name: "{{ openshift_aws_clusterid }} master group"
+ group: master
+
+openshift_aws_node_groups:
+- name: "{{ openshift_aws_clusterid }} compute group"
+ group: compute
+- name: "{{ openshift_aws_clusterid }} infra group"
+ group: infra
+
+openshift_aws_created_asgs: []
+openshift_aws_current_asgs: []
+
+# these will be used during upgrade
openshift_aws_master_group_config:
# The 'master' key is always required here.
master:
@@ -139,7 +148,6 @@ openshift_aws_master_group_config:
host-type: master
sub-host-type: default
runtime: docker
- version: "{{ openshift_aws_new_version }}"
wait_for_instances: True
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
@@ -163,7 +171,6 @@ openshift_aws_node_group_config:
host-type: node
sub-host-type: compute
runtime: docker
- version: "{{ openshift_aws_new_version }}"
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
iam_role: "{{ openshift_aws_iam_role_name }}"
@@ -183,7 +190,6 @@ openshift_aws_node_group_config:
host-type: node
sub-host-type: infra
runtime: docker
- version: "{{ openshift_aws_new_version }}"
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
iam_role: "{{ openshift_aws_iam_role_name }}"
@@ -283,21 +289,4 @@ openshift_aws_node_run_bootstrap_startup: True
openshift_aws_node_user_data: ''
openshift_aws_node_config_namespace: openshift-node
-openshift_aws_node_groups: nodes
-
openshift_aws_masters_groups: masters,etcd,nodes
-
-# If creating extra node groups, you'll need to define all of the following
-
-# The format is the same as openshift_aws_node_group_config, but the top-level
-# key names should be different (ie, not == master or infra).
-# openshift_aws_node_group_config_extra: {}
-
-# This variable should look like openshift_aws_launch_config_security_groups
-# and contain a one-to-one mapping of top level keys that are defined in
-# openshift_aws_node_group_config_extra.
-# openshift_aws_launch_config_security_groups_extra: {}
-
-# openshift_aws_node_security_groups_extra: {}
-
-# openshift_aws_ami_map_extra: {}
diff --git a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
index e707abd3f..dfcb11da3 100644
--- a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
+++ b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
@@ -4,11 +4,43 @@
Custom filters for use in openshift_aws
'''
+from ansible import errors
+
class FilterModule(object):
''' Custom ansible filters for use by openshift_aws role'''
@staticmethod
+ def scale_groups_serial(scale_group_info, upgrade=False):
+ ''' This function will determine what the deployment serial should be and return it
+
+ Search through the tags and find the deployment_serial tag. Once found,
+ determine if an increment is needed during an upgrade.
+ if upgrade is true then increment the serial and return it
+ else return the serial
+ '''
+ if scale_group_info == []:
+ return 1
+
+ scale_group_info = scale_group_info[0]
+
+ if not isinstance(scale_group_info, dict):
+ raise errors.AnsibleFilterError("|filter plugin failed: Expected scale_group_info to be a dict")
+
+ serial = None
+
+ for tag in scale_group_info['tags']:
+ if tag['key'] == 'deployment_serial':
+ serial = int(tag['value'])
+ if upgrade:
+ serial += 1
+ break
+ else:
+ raise errors.AnsibleFilterError("|filter plugin failed: deployment_serial tag was not found")
+
+ return serial
+
+ @staticmethod
def scale_groups_match_capacity(scale_group_info):
''' This function will verify that the scale group instance count matches
the scale group desired capacity
@@ -38,4 +70,5 @@ class FilterModule(object):
def filters(self):
''' returns a mapping of filters to methods '''
return {'build_instance_tags': self.build_instance_tags,
- 'scale_groups_match_capacity': self.scale_groups_match_capacity}
+ 'scale_groups_match_capacity': self.scale_groups_match_capacity,
+ 'scale_groups_serial': self.scale_groups_serial}
diff --git a/roles/openshift_aws/tasks/accept_nodes.yml b/roles/openshift_aws/tasks/accept_nodes.yml
index ae320962f..c2a2cea30 100644
--- a/roles/openshift_aws/tasks/accept_nodes.yml
+++ b/roles/openshift_aws/tasks/accept_nodes.yml
@@ -1,6 +1,6 @@
---
- name: fetch masters
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region | default('us-east-1') }}"
filters:
"{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
@@ -11,7 +11,7 @@
until: "'instances' in mastersout and mastersout.instances|length > 0"
- name: fetch new node instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
@@ -22,9 +22,14 @@
delay: 3
until: "'instances' in instancesout and instancesout.instances|length > 0"
-- debug:
+- name: Dump the private dns names
+ debug:
msg: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}"
+- name: Dump the master public ip address
+ debug:
+ msg: "{{ mastersout.instances[0].public_ip_address }}"
+
- name: approve nodes
oc_adm_csr:
#approve_all: True
diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml
index 2c1e88cfb..7fb617dd5 100644
--- a/roles/openshift_aws/tasks/build_node_group.yml
+++ b/roles/openshift_aws/tasks/build_node_group.yml
@@ -1,6 +1,4 @@
---
-# This task file expects l_nodes_to_build to be passed in.
-
# When openshift_aws_use_custom_ami is '' then
# we retrieve the latest build AMI.
# Then set openshift_aws_ami to the ami.
@@ -26,6 +24,35 @@
# Need to set epoch time in one place to use for launch_config and scale_group
- set_fact:
l_epoch_time: "{{ ansible_date_time.epoch }}"
+#
+# query asg's and determine if we need to create the others.
+# if we find more than 1 for each type, then exit
+- name: query all asg's for this cluster
+ ec2_asg_facts:
+ region: "{{ openshift_aws_region }}"
+ tags: "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} | combine(l_node_group_config[openshift_aws_node_group.group].tags) }}"
+ register: asgs
+
+- fail:
+ msg: "Found more than 1 auto scaling group that matches the query for group: {{ openshift_aws_node_group }}"
+ when:
+ - asgs.results|length > 1
+
+- debug:
+ msg: "{{ asgs }}"
+
+- name: set the value for the deployment_serial and the current asgs
+ set_fact:
+ l_deployment_serial: "{{ openshift_aws_node_group_deployment_serial if openshift_aws_node_group_deployment_serial is defined else asgs.results | scale_groups_serial(openshift_aws_node_group_upgrade) }}"
+ openshift_aws_current_asgs: "{{ asgs.results | map(attribute='auto_scaling_group_name') | list | union(openshift_aws_current_asgs) }}"
+
+- name: dump deployment serial
+ debug:
+ msg: "Deployment serial: {{ l_deployment_serial }}"
+
+- name: dump current_asgs
+ debug:
+ msg: "openshift_aws_current_asgs: {{ openshift_aws_current_asgs }}"
- when: openshift_aws_create_iam_role
include_tasks: iam_role.yml
diff --git a/roles/openshift_aws/tasks/iam_role.yml b/roles/openshift_aws/tasks/iam_role.yml
index d9910d938..cf3bb28fb 100644
--- a/roles/openshift_aws/tasks/iam_role.yml
+++ b/roles/openshift_aws/tasks/iam_role.yml
@@ -13,11 +13,10 @@
#####
- name: Create an iam role
iam_role:
- name: "{{ item.value.iam_role }}"
+ name: "{{ l_node_group_config[openshift_aws_node_group.group].iam_role }}"
assume_role_policy_document: "{{ lookup('file','trustpolicy.json') }}"
state: "{{ openshift_aws_iam_role_state | default('present') }}"
- when: item.value.iam_role is defined
- with_dict: "{{ l_nodes_to_build }}"
+ when: l_node_group_config[openshift_aws_node_group.group].iam_role is defined
#####
# The second part of this task file is linking the role to a policy
@@ -28,9 +27,8 @@
- name: create an iam policy
iam_policy:
iam_type: role
- iam_name: "{{ item.value.iam_role }}"
- policy_json: "{{ item.value.policy_json }}"
- policy_name: "{{ item.value.policy_name }}"
+ iam_name: "{{ l_node_group_config[openshift_aws_node_group.group].iam_role }}"
+ policy_json: "{{ l_node_group_config[openshift_aws_node_group.group].policy_json }}"
+ policy_name: "{{ l_node_group_config[openshift_aws_node_group.group].policy_name }}"
state: "{{ openshift_aws_iam_role_state | default('present') }}"
- when: item.value.iam_role is defined
- with_dict: "{{ l_nodes_to_build }}"
+ when: "'iam_role' in l_node_group_config[openshift_aws_node_group.group]"
diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml
index fed80b7eb..6f78ee00a 100644
--- a/roles/openshift_aws/tasks/launch_config.yml
+++ b/roles/openshift_aws/tasks/launch_config.yml
@@ -1,15 +1,26 @@
---
-- fail:
- msg: "Ensure that an AMI value is defined for openshift_aws_ami or openshift_aws_launch_config_custom_image."
- when:
- - openshift_aws_ami is undefined
+- name: fetch the security groups for launch config
+ ec2_group_facts:
+ filters:
+ group-name: "{{ openshift_aws_launch_config_security_groups[openshift_aws_node_group.group] }}"
+ vpc-id: "{{ vpcout.vpcs[0].id }}"
+ region: "{{ openshift_aws_region }}"
+ register: ec2sgs
-- fail:
- msg: "Ensure that openshift_deployment_type is defined."
- when:
- - openshift_deployment_type is undefined
-
-- include_tasks: launch_config_create.yml
- with_dict: "{{ l_nodes_to_build }}"
- loop_control:
- loop_var: launch_config_item
+# Create the scale group config
+- name: Create the node scale group launch config
+ ec2_lc:
+ name: "{{ openshift_aws_node_group.name }}-{{ openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami) }}-{{ l_epoch_time }}"
+ region: "{{ openshift_aws_region }}"
+ image_id: "{{ openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami) }}"
+ instance_type: "{{ l_node_group_config[openshift_aws_node_group.group].instance_type }}"
+ security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}"
+ instance_profile_name: "{{ l_node_group_config[openshift_aws_node_group.group].iam_role if l_node_group_config[openshift_aws_node_group.group].iam_role is defined and
+ l_node_group_config[openshift_aws_node_group.group].iam_role != '' and
+ openshift_aws_create_iam_role
+ else omit }}"
+ user_data: "{{ lookup('template', 'user_data.j2') }}"
+ key_name: "{{ openshift_aws_ssh_key_name }}"
+ ebs_optimized: False
+ volumes: "{{ l_node_group_config[openshift_aws_node_group.group].volumes }}"
+ assign_public_ip: True
diff --git a/roles/openshift_aws/tasks/launch_config_create.yml b/roles/openshift_aws/tasks/launch_config_create.yml
deleted file mode 100644
index f7f0f0953..000000000
--- a/roles/openshift_aws/tasks/launch_config_create.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: fetch the security groups for launch config
- ec2_group_facts:
- filters:
- group-name: "{{ l_launch_config_security_groups[launch_config_item.key] }}"
- vpc-id: "{{ vpcout.vpcs[0].id }}"
- region: "{{ openshift_aws_region }}"
- register: ec2sgs
-
-# Create the scale group config
-- name: Create the node scale group launch config
- ec2_lc:
- name: "{{ openshift_aws_launch_config_basename }}-{{ launch_config_item.key }}{{'-' ~ openshift_aws_new_version if openshift_aws_new_version != '' else '' }}"
- region: "{{ openshift_aws_region }}"
- image_id: "{{ l_aws_ami_map[launch_config_item.key] | default(openshift_aws_ami) }}"
- instance_type: "{{ launch_config_item.value.instance_type }}"
- security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}"
- instance_profile_name: "{{ launch_config_item.value.iam_role if launch_config_item.value.iam_role is defined and
- launch_config_item.value.iam_role != '' and
- openshift_aws_create_iam_role
- else omit }}"
- user_data: "{{ lookup('template', 'user_data.j2') }}"
- key_name: "{{ openshift_aws_ssh_key_name }}"
- ebs_optimized: False
- volumes: "{{ launch_config_item.value.volumes }}"
- assign_public_ip: True
diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml
index 06f649343..786a2e4cf 100644
--- a/roles/openshift_aws/tasks/provision.yml
+++ b/roles/openshift_aws/tasks/provision.yml
@@ -20,13 +20,14 @@
- name: include scale group creation for master
include_tasks: build_node_group.yml
+ with_items: "{{ openshift_aws_master_group }}"
vars:
- l_nodes_to_build: "{{ openshift_aws_master_group_config }}"
- l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups }}"
- l_aws_ami_map: "{{ openshift_aws_ami_map }}"
+ l_node_group_config: "{{ openshift_aws_master_group_config }}"
+ loop_control:
+ loop_var: openshift_aws_node_group
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid }}"
diff --git a/roles/openshift_aws/tasks/provision_instance.yml b/roles/openshift_aws/tasks/provision_instance.yml
index 8cc75cd0c..696b323c0 100644
--- a/roles/openshift_aws/tasks/provision_instance.yml
+++ b/roles/openshift_aws/tasks/provision_instance.yml
@@ -27,7 +27,7 @@
Name: "{{ openshift_aws_base_ami_name }}"
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:Name": "{{ openshift_aws_base_ami_name }}"
diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml
index 041ed0791..d82f18574 100644
--- a/roles/openshift_aws/tasks/provision_nodes.yml
+++ b/roles/openshift_aws/tasks/provision_nodes.yml
@@ -3,7 +3,7 @@
# bootstrap should be created on first master
# need to fetch it and shove it into cloud data
- name: fetch master instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid }}"
@@ -31,20 +31,15 @@
- name: include build compute and infra node groups
include_tasks: build_node_group.yml
+ with_items: "{{ openshift_aws_node_groups }}"
vars:
- l_nodes_to_build: "{{ openshift_aws_node_group_config }}"
- l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups }}"
- l_aws_ami_map: "{{ openshift_aws_ami_map }}"
-
-- name: include build node group for extra nodes
- include_tasks: build_node_group.yml
- when: openshift_aws_node_group_config_extra is defined
- vars:
- l_nodes_to_build: "{{ openshift_aws_node_group_config_extra | default({}) }}"
- l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups_extra }}"
- l_aws_ami_map: "{{ openshift_aws_ami_map_extra }}"
+ l_node_group_config: "{{ openshift_aws_node_group_config }}"
+ loop_control:
+ loop_var: openshift_aws_node_group
# instances aren't scaling fast enough here, we need to wait for them
- when: openshift_aws_wait_for_ssh | bool
name: wait for our new nodes to come up
include_tasks: wait_for_groups.yml
+ vars:
+ created_asgs: "{{ openshift_aws_created_asgs }}"
diff --git a/roles/openshift_aws/tasks/remove_scale_group.yml b/roles/openshift_aws/tasks/remove_scale_group.yml
index 55d1af2b5..a01cde294 100644
--- a/roles/openshift_aws/tasks/remove_scale_group.yml
+++ b/roles/openshift_aws/tasks/remove_scale_group.yml
@@ -1,10 +1,13 @@
---
+# FIGURE OUT HOW TO REMOVE SCALE GROUPS
+# use openshift_aws_current_asgs??
- name: fetch the scale groups
ec2_asg_facts:
region: "{{ openshift_aws_region }}"
+ name: "^{{ item }}$"
tags:
- "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
- 'version': openshift_aws_current_version} }}"
+ "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} }}"
+ with_items: "{{ openshift_aws_current_asgs if openshift_aws_current_asgs != [] else openshift_aws_asgs_to_remove }}"
register: qasg
- name: remove non-master scale groups
@@ -14,7 +17,7 @@
name: "{{ item.auto_scaling_group_name }}"
when: "'master' not in item.auto_scaling_group_name"
register: asg_results
- with_items: "{{ qasg.results }}"
+ with_items: "{{ qasg | json_query('results[*]') | sum(attribute='results', start=[]) }}"
async: 600
poll: 0
diff --git a/roles/openshift_aws/tasks/s3.yml b/roles/openshift_aws/tasks/s3.yml
index 9cf37c840..ba70bcff6 100644
--- a/roles/openshift_aws/tasks/s3.yml
+++ b/roles/openshift_aws/tasks/s3.yml
@@ -1,6 +1,6 @@
---
- name: Create an s3 bucket
- s3:
+ aws_s3:
bucket: "{{ openshift_aws_s3_bucket_name }}"
mode: "{{ openshift_aws_s3_mode }}"
region: "{{ openshift_aws_region }}"
diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml
index 30df7545d..3632f7ce9 100644
--- a/roles/openshift_aws/tasks/scale_group.yml
+++ b/roles/openshift_aws/tasks/scale_group.yml
@@ -1,20 +1,30 @@
---
+- name: set node group name
+ set_fact:
+ l_node_group_name: "{{ openshift_aws_node_group.name }} {{ l_deployment_serial }}"
+
- name: Create the scale group
ec2_asg:
- name: "{{ openshift_aws_scale_group_basename }} {{ item.key }}"
- launch_config_name: "{{ openshift_aws_launch_config_basename }}-{{ item.key }}{{ '-' ~ openshift_aws_new_version if openshift_aws_new_version != '' else '' }}"
- health_check_period: "{{ item.value.health_check.period }}"
- health_check_type: "{{ item.value.health_check.type }}"
- min_size: "{{ item.value.min_size }}"
- max_size: "{{ item.value.max_size }}"
- desired_capacity: "{{ item.value.desired_size }}"
+ name: "{{ l_node_group_name }}"
+ launch_config_name: "{{ openshift_aws_node_group.name }}-{{ openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami) }}-{{ l_epoch_time }}"
+ health_check_period: "{{ l_node_group_config[openshift_aws_node_group.group].health_check.period }}"
+ health_check_type: "{{ l_node_group_config[openshift_aws_node_group.group].health_check.type }}"
+ min_size: "{{ l_node_group_config[openshift_aws_node_group.group].min_size }}"
+ max_size: "{{ l_node_group_config[openshift_aws_node_group.group].max_size }}"
+ desired_capacity: "{{ l_node_group_config[openshift_aws_node_group.group].desired_size }}"
region: "{{ openshift_aws_region }}"
- termination_policies: "{{ item.value.termination_policy if 'termination_policy' in item.value else omit }}"
- load_balancers: "{{ item.value.elbs if 'elbs' in item.value else omit }}"
- wait_for_instances: "{{ item.value.wait_for_instances | default(False)}}"
+ termination_policies: "{{ l_node_group_config[openshift_aws_node_group.group].termination_policy if 'termination_policy' in l_node_group_config[openshift_aws_node_group.group] else omit }}"
+ load_balancers: "{{ l_node_group_config[openshift_aws_node_group.group].elbs if 'elbs' in l_node_group_config[openshift_aws_node_group.group] else omit }}"
+ wait_for_instances: "{{ l_node_group_config[openshift_aws_node_group.group].wait_for_instances | default(False)}}"
vpc_zone_identifier: "{{ subnetout.subnets[0].id }}"
replace_instances: "{{ openshift_aws_node_group_replace_instances if openshift_aws_node_group_replace_instances != [] else omit }}"
- replace_all_instances: "{{ omit if openshift_aws_node_group_replace_instances != [] else (item.value.replace_all_instances | default(omit)) }}"
+ replace_all_instances: "{{ omit if openshift_aws_node_group_replace_instances != []
+ else (l_node_group_config[openshift_aws_node_group.group].replace_all_instances | default(omit)) }}"
tags:
- - "{{ openshift_aws_node_group_config_tags | combine(item.value.tags) }}"
- with_dict: "{{ l_nodes_to_build }}"
+ - "{{ openshift_aws_node_group_config_tags
+ | combine(l_node_group_config[openshift_aws_node_group.group].tags)
+ | combine({'deployment_serial': l_deployment_serial, 'ami': openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami)}) }}"
+
+- name: append the asg name to the openshift_aws_created_asgs fact
+ set_fact:
+ openshift_aws_created_asgs: "{{ [l_node_group_name] | union(openshift_aws_created_asgs) | list }}"
diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml
index 7a3d0fb68..74877d5c7 100644
--- a/roles/openshift_aws/tasks/seal_ami.yml
+++ b/roles/openshift_aws/tasks/seal_ami.yml
@@ -1,6 +1,6 @@
---
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:Name": "{{ openshift_aws_base_ami_name }}"
@@ -12,7 +12,7 @@
- name: bundle ami
ec2_ami:
- instance_id: "{{ instancesout.instances.0.id }}"
+ instance_id: "{{ instancesout.instances.0.instance_id }}"
region: "{{ openshift_aws_region }}"
state: present
description: "This was provisioned {{ ansible_date_time.iso8601 }}"
@@ -46,4 +46,4 @@
ec2:
state: absent
region: "{{ openshift_aws_region }}"
- instance_ids: "{{ instancesout.instances.0.id }}"
+ instance_ids: "{{ instancesout.instances.0.instance_id }}"
diff --git a/roles/openshift_aws/tasks/security_group.yml b/roles/openshift_aws/tasks/security_group.yml
index 43834079e..0568a0190 100644
--- a/roles/openshift_aws/tasks/security_group.yml
+++ b/roles/openshift_aws/tasks/security_group.yml
@@ -6,11 +6,27 @@
"tag:Name": "{{ openshift_aws_clusterid }}"
register: vpcout
-- include_tasks: security_group_create.yml
- vars:
- l_security_groups: "{{ openshift_aws_node_security_groups }}"
+- name: create the node group sgs
+ oo_ec2_group:
+ name: "{{ item.value.name}}"
+ description: "{{ item.value.desc }}"
+ rules: "{{ item.value.rules if 'rules' in item.value else [] }}"
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ with_dict: "{{ openshift_aws_node_security_groups }}"
+
+- name: create the k8s sgs for the node group
+ oo_ec2_group:
+ name: "{{ item.value.name }}_k8s"
+ description: "{{ item.value.desc }} for k8s"
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ with_dict: "{{ openshift_aws_node_security_groups }}"
+ register: k8s_sg_create
-- include_tasks: security_group_create.yml
- when: openshift_aws_node_security_groups_extra is defined
- vars:
- l_security_groups: "{{ openshift_aws_node_security_groups_extra | default({}) }}"
+- name: tag sg groups with proper tags
+ ec2_tag:
+ tags: "{{ openshift_aws_security_groups_tags }}"
+ resource: "{{ item.group_id }}"
+ region: "{{ openshift_aws_region }}"
+ with_items: "{{ k8s_sg_create.results }}"
diff --git a/roles/openshift_aws/tasks/security_group_create.yml b/roles/openshift_aws/tasks/security_group_create.yml
deleted file mode 100644
index ef6060555..000000000
--- a/roles/openshift_aws/tasks/security_group_create.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- name: create the node group sgs
- ec2_group:
- name: "{{ item.value.name}}"
- description: "{{ item.value.desc }}"
- rules: "{{ item.value.rules if 'rules' in item.value else [] }}"
- region: "{{ openshift_aws_region }}"
- vpc_id: "{{ vpcout.vpcs[0].id }}"
- with_dict: "{{ l_security_groups }}"
-
-- name: create the k8s sgs for the node group
- ec2_group:
- name: "{{ item.value.name }}_k8s"
- description: "{{ item.value.desc }} for k8s"
- region: "{{ openshift_aws_region }}"
- vpc_id: "{{ vpcout.vpcs[0].id }}"
- with_dict: "{{ l_security_groups }}"
- register: k8s_sg_create
-
-- name: tag sg groups with proper tags
- ec2_tag:
- tags: "{{ openshift_aws_security_groups_tags }}"
- resource: "{{ item.group_id }}"
- region: "{{ openshift_aws_region }}"
- with_items: "{{ k8s_sg_create.results }}"
diff --git a/roles/openshift_aws/tasks/setup_master_group.yml b/roles/openshift_aws/tasks/setup_master_group.yml
index 05b68f460..700917ef4 100644
--- a/roles/openshift_aws/tasks/setup_master_group.yml
+++ b/roles/openshift_aws/tasks/setup_master_group.yml
@@ -8,7 +8,7 @@
msg: "openshift_aws_region={{ openshift_aws_region }}"
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid }}"
@@ -19,11 +19,13 @@
delay: 3
until: instancesout.instances|length > 0
+- debug: var=instancesout
+
- name: add new master to masters group
add_host:
groups: "{{ openshift_aws_masters_groups }}"
name: "{{ item.public_dns_name }}"
- hostname: "{{ openshift_aws_clusterid }}-master-{{ item.id[:-5] }}"
+ hostname: "{{ openshift_aws_clusterid }}-master-{{ item.instance_id[:-5] }}"
with_items: "{{ instancesout.instances }}"
- name: wait for ssh to become available
diff --git a/roles/openshift_aws/tasks/setup_scale_group_facts.yml b/roles/openshift_aws/tasks/setup_scale_group_facts.yml
index d65fdc2de..14c5246c9 100644
--- a/roles/openshift_aws/tasks/setup_scale_group_facts.yml
+++ b/roles/openshift_aws/tasks/setup_scale_group_facts.yml
@@ -1,11 +1,15 @@
---
-- name: group scale group nodes
- ec2_remote_facts:
+- name: fetch all created instances
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
- "{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid }}}"
+ "{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
+ 'instance-state-name': 'running'} }}"
register: qinstances
+# The building of new and current groups is dependent of having a list of the current asgs and the created ones
+# that can be found in the variables: openshift_aws_created_asgs, openshift_aws_current_asgs. If these do not exist, we cannot determine which hosts are
+# new and which hosts are current.
- name: Build new node group
add_host:
groups: oo_sg_new_nodes
@@ -13,10 +17,16 @@
name: "{{ item.public_dns_name }}"
hostname: "{{ item.public_dns_name }}"
when:
- - (item.tags.version | default(False)) == openshift_aws_new_version
+ - openshift_aws_created_asgs != []
+ - "'aws:autoscaling:groupName' in item.tags"
+ - item.tags['aws:autoscaling:groupName'] in openshift_aws_created_asgs
- "'node' in item.tags['host-type']"
with_items: "{{ qinstances.instances }}"
+- name: dump openshift_aws_current_asgs
+ debug:
+ msg: "{{ openshift_aws_current_asgs }}"
+
- name: Build current node group
add_host:
groups: oo_sg_current_nodes
@@ -24,7 +34,9 @@
name: "{{ item.public_dns_name }}"
hostname: "{{ item.public_dns_name }}"
when:
- - (item.tags.version | default('')) == openshift_aws_current_version
+ - openshift_aws_current_asgs != []
+ - "'aws:autoscaling:groupName' in item.tags"
+ - item.tags['aws:autoscaling:groupName'] in openshift_aws_current_asgs
- "'node' in item.tags['host-type']"
with_items: "{{ qinstances.instances }}"
diff --git a/roles/openshift_aws/tasks/upgrade_node_group.yml b/roles/openshift_aws/tasks/upgrade_node_group.yml
index c3f86f523..4f4730dd6 100644
--- a/roles/openshift_aws/tasks/upgrade_node_group.yml
+++ b/roles/openshift_aws/tasks/upgrade_node_group.yml
@@ -1,12 +1,22 @@
---
-- fail:
- msg: 'Please ensure the current_version and new_version variables are not the same.'
+- include_tasks: provision_nodes.yml
+ vars:
+ openshift_aws_node_group_upgrade: True
when:
- - openshift_aws_current_version == openshift_aws_new_version
+ - openshift_aws_upgrade_provision_nodes | default(True)
-- include_tasks: provision_nodes.yml
+- debug: var=openshift_aws_current_asgs
+- debug: var=openshift_aws_created_asgs
+
+- name: fail if asg variables aren't set
+ fail:
+ msg: "Please ensure that openshift_aws_created_asgs and openshift_aws_current_asgs are defined."
+ when:
+ - openshift_aws_created_asgs == []
+ - openshift_aws_current_asgs == []
- include_tasks: accept_nodes.yml
+ when: openshift_aws_upgrade_accept_nodes | default(True)
- include_tasks: setup_scale_group_facts.yml
diff --git a/roles/openshift_aws/tasks/wait_for_groups.yml b/roles/openshift_aws/tasks/wait_for_groups.yml
index 9f1a68a2a..1f4ef3e1c 100644
--- a/roles/openshift_aws/tasks/wait_for_groups.yml
+++ b/roles/openshift_aws/tasks/wait_for_groups.yml
@@ -1,31 +1,41 @@
---
# The idea here is to wait until all scale groups are at
# their desired capacity before continuing.
-- name: fetch the scale groups
+# This is accomplished with a custom filter_plugin and until clause
+- name: "fetch the scale groups"
ec2_asg_facts:
region: "{{ openshift_aws_region }}"
tags:
- "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} }}"
+ "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid } }}"
register: qasg
- until: qasg.results | scale_groups_match_capacity | bool
+ until: qasg | json_query('results[*]') | scale_groups_match_capacity | bool
delay: 10
retries: 60
+- debug: var=openshift_aws_created_asgs
+
+# how do we gaurantee the instances are up?
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
- 'tag:version': openshift_aws_new_version} }}"
+ 'tag:aws:autoscaling:groupName': item,
+ 'instance-state-name': 'running'} }}"
+ with_items: "{{ openshift_aws_created_asgs if openshift_aws_created_asgs != [] else qasg | sum(attribute='results', start=[]) }}"
register: instancesout
until: instancesout.instances|length > 0
delay: 5
retries: 60
+- name: dump instances
+ debug:
+ msg: "{{ instancesout.results | sum(attribute='instances', start=[]) }}"
+
- name: wait for ssh to become available
wait_for:
port: 22
host: "{{ item.public_ip_address }}"
timeout: 300
search_regex: OpenSSH
- with_items: "{{ instancesout.instances }}"
+ with_items: "{{ instancesout.results | sum(attribute='instances', start=[]) }}"
diff --git a/roles/openshift_aws/templates/user_data.j2 b/roles/openshift_aws/templates/user_data.j2
index fe0fe83d4..bda1334cd 100644
--- a/roles/openshift_aws/templates/user_data.j2
+++ b/roles/openshift_aws/templates/user_data.j2
@@ -7,8 +7,8 @@ write_files:
owner: 'root:root'
permissions: '0640'
content: |
- openshift_group_type: {{ launch_config_item.key }}
-{% if launch_config_item.key != 'master' %}
+ openshift_group_type: {{ openshift_aws_node_group.group }}
+{% if openshift_aws_node_group.group != 'master' %}
- path: /etc/origin/node/bootstrap.kubeconfig
owner: 'root:root'
permissions: '0640'
@@ -19,7 +19,7 @@ runcmd:
{% if openshift_aws_node_run_bootstrap_startup %}
- [ ansible-playbook, /root/openshift_bootstrap/bootstrap.yml]
{% endif %}
-{% if launch_config_item.key != 'master' %}
+{% if openshift_aws_node_group.group != 'master' %}
- [ systemctl, restart, NetworkManager]
- [ systemctl, enable, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]
- [ systemctl, start, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]
diff --git a/roles/openshift_builddefaults/tasks/main.yml b/roles/openshift_builddefaults/tasks/main.yml
index e0b51eee0..612b6522d 100644
--- a/roles/openshift_builddefaults/tasks/main.yml
+++ b/roles/openshift_builddefaults/tasks/main.yml
@@ -4,11 +4,6 @@
role: builddefaults
# TODO: add ability to define builddefaults env vars sort of like this
# may need to move the config generation to a filter however.
- # openshift_env: "{{ hostvars
- # | oo_merge_hostvars(vars, inventory_hostname)
- # | oo_openshift_env }}"
- # openshift_env_structures:
- # - 'openshift.builddefaults.env.*'
local_facts:
http_proxy: "{{ openshift_builddefaults_http_proxy | default(None) }}"
https_proxy: "{{ openshift_builddefaults_https_proxy | default(None) }}"
diff --git a/roles/openshift_ca/meta/main.yml b/roles/openshift_ca/meta/main.yml
index f8b784a63..81b49ce60 100644
--- a/roles/openshift_ca/meta/main.yml
+++ b/roles/openshift_ca/meta/main.yml
@@ -14,3 +14,4 @@ galaxy_info:
- system
dependencies:
- role: openshift_cli
+- role: openshift_facts
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index 05e0a1352..eb00f13db 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -9,7 +9,7 @@
- name: Install the base package for admin tooling
package:
- name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift_service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
when: not openshift.common.is_containerized | bool
register: install_result
diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py
index 08045794a..440b8ec28 100644
--- a/roles/openshift_cli/library/openshift_container_binary_sync.py
+++ b/roles/openshift_cli/library/openshift_container_binary_sync.py
@@ -27,7 +27,7 @@ class BinarySyncError(Exception):
# pylint: disable=too-few-public-methods,too-many-instance-attributes
class BinarySyncer(object):
"""
- Syncs the openshift, oc, oadm, and kubectl binaries/symlinks out of
+ Syncs the openshift, oc, and kubectl binaries/symlinks out of
a container onto the host system.
"""
@@ -108,7 +108,10 @@ class BinarySyncer(object):
# Ensure correct symlinks created:
self._sync_symlink('kubectl', 'openshift')
- self._sync_symlink('oadm', 'openshift')
+
+ # Remove old oadm binary
+ if os.path.exists(os.path.join(self.bin_dir, 'oadm')):
+ os.remove(os.path.join(self.bin_dir, 'oadm'))
def _sync_symlink(self, binary_name, link_to):
""" Ensure the given binary name exists and links to the expected binary. """
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index 140c6ea26..888aa8f0c 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: Install clients
- package: name={{ openshift.common.service_type }}-clients state=present
+ package: name={{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }} state=present
when: not openshift.common.is_containerized | bool
register: result
until: result | success
diff --git a/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2 b/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2
index 53e8b448b..3d51abc52 100644
--- a/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2
+++ b/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2
@@ -5,7 +5,7 @@ items:
kind: ServiceAccount
metadata:
name: dockergc
- # You must grant privileged via: oadm policy add-scc-to-user -z dockergc privileged
+ # You must grant privileged via: oc adm policy add-scc-to-user -z dockergc privileged
# in order for the dockergc to access the docker socket and root directory
- apiVersion: extensions/v1beta1
kind: DaemonSet
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index 1d5fba990..68a0e8857 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -5,7 +5,7 @@
#
# This script should be run from openshift-ansible/roles/openshift_examples
-XPAAS_VERSION=ose-v1.4.6
+XPAAS_VERSION=ose-v1.4.7
ORIGIN_VERSION=${1:-v3.7}
RHAMP_TAG=2.0.0.GA
EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION}
diff --git a/roles/openshift_examples/files/examples/latest b/roles/openshift_examples/files/examples/latest
new file mode 120000
index 000000000..8cad94b63
--- /dev/null
+++ b/roles/openshift_examples/files/examples/latest
@@ -0,0 +1 @@
+v3.9 \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v3.7/v3.8 b/roles/openshift_examples/files/examples/v3.7/v3.8
deleted file mode 120000
index 8ddcf661c..000000000
--- a/roles/openshift_examples/files/examples/v3.7/v3.8
+++ /dev/null
@@ -1 +0,0 @@
-v3.8 \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json
index 217ef11dd..92be8f42e 100644
--- a/roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "mariadb-persistent",
"annotations": {
- "openshift.io/display-name": "MariaDB (Persistent)",
+ "openshift.io/display-name": "MariaDB",
"description": "MariaDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mariadb",
"tags": "database,mariadb",
diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json
index 97e4128a4..4e3e64d48 100644
--- a/roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "mongodb-persistent",
"annotations": {
- "openshift.io/display-name": "MongoDB (Persistent)",
+ "openshift.io/display-name": "MongoDB",
"description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mongodb",
"tags": "database,mongodb",
diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json
index 48ac114fd..6ac80f3a0 100644
--- a/roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "mysql-persistent",
"annotations": {
- "openshift.io/display-name": "MySQL (Persistent)",
+ "openshift.io/display-name": "MySQL",
"description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mysql-database",
"tags": "database,mysql",
diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json
index 8a2d23907..190509112 100644
--- a/roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "postgresql-persistent",
"annotations": {
- "openshift.io/display-name": "PostgreSQL (Persistent)",
+ "openshift.io/display-name": "PostgreSQL",
"description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-postgresql",
"tags": "database,postgresql",
diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json
index e0e0a88d5..d1103d3af 100644
--- a/roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "redis-persistent",
"annotations": {
- "openshift.io/display-name": "Redis (Persistent)",
+ "openshift.io/display-name": "Redis",
"description": "Redis in-memory data structure store, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-redis",
"tags": "database,redis",
diff --git a/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json
index e7af160d9..0aa586061 100644
--- a/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json
@@ -407,7 +407,7 @@
"annotations": {
"openshift.io/display-name": "Python (Latest)",
"openshift.io/provider-display-name": "Red Hat, Inc.",
- "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
+ "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python",
@@ -415,7 +415,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "3.5"
+ "name": "3.6"
}
},
{
@@ -485,6 +485,23 @@
"kind": "DockerImage",
"name": "centos/python-35-centos7:latest"
}
+ },
+ {
+ "name": "3.6",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.6",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "description": "Build and run Python 3.6 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.6,python",
+ "version": "3.6",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/python-36-centos7:latest"
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json
index 2b082fc75..7ab9f4e59 100644
--- a/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json
@@ -407,7 +407,7 @@
"annotations": {
"openshift.io/display-name": "Python (Latest)",
"openshift.io/provider-display-name": "Red Hat, Inc.",
- "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
+ "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python",
@@ -415,7 +415,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "3.5"
+ "name": "3.6"
}
},
{
@@ -485,6 +485,23 @@
"kind": "DockerImage",
"name": "registry.access.redhat.com/rhscl/python-35-rhel7:latest"
}
+ },
+ {
+ "name": "3.6",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.6",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "description": "Build and run Python 3.6 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.6,python",
+ "version": "3.6",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/python-36-rhel7:latest"
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json
index 86ddc184a..40b4eaa81 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "cakephp-mysql-persistent",
"annotations": {
- "openshift.io/display-name": "CakePHP + MySQL (Persistent)",
+ "openshift.io/display-name": "CakePHP + MySQL",
"description": "An example CakePHP application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/cakephp-ex/blob/master/README.md.",
"tags": "quickstart,php,cakephp",
"iconClass": "icon-php",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
"labels": {
- "template": "cakephp-mysql-persistent"
+ "template": "cakephp-mysql-persistent",
+ "app": "cakephp-mysql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json
index 3c964bd6a..ecd90e495 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
"labels": {
- "template": "cakephp-mysql-example"
+ "template": "cakephp-mysql-example",
+ "app": "cakephp-mysql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json
index 0a10c5fbc..17a155600 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "dancer-mysql-persistent",
"annotations": {
- "openshift.io/display-name": "Dancer + MySQL (Persistent)",
+ "openshift.io/display-name": "Dancer + MySQL",
"description": "An example Dancer application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"tags": "quickstart,perl,dancer",
"iconClass": "icon-perl",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"labels": {
- "template": "dancer-mysql-persistent"
+ "template": "dancer-mysql-persistent",
+ "app": "dancer-mysql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json
index 6122d5436..abf711535 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"labels": {
- "template": "dancer-mysql-example"
+ "template": "dancer-mysql-example",
+ "app": "dancer-mysql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json
index f3b5838fa..c8dab0b53 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "django-psql-persistent",
"annotations": {
- "openshift.io/display-name": "Django + PostgreSQL (Persistent)",
+ "openshift.io/display-name": "Django + PostgreSQL",
"description": "An example Django application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"tags": "quickstart,python,django",
"iconClass": "icon-python",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"labels": {
- "template": "django-psql-persistent"
+ "template": "django-psql-persistent",
+ "app": "django-psql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json
index b21295df2..6395defda 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"labels": {
- "template": "django-psql-example"
+ "template": "django-psql-example",
+ "app": "django-psql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json
index 3771280bf..d7cab4889 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.",
"labels": {
- "template": "httpd-example"
+ "template": "httpd-example",
+ "app": "httpd-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json
index 28b4b9d81..6186bba10 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json
@@ -15,6 +15,9 @@
}
},
"message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
+ "labels": {
+ "app": "jenkins-ephemeral"
+ },
"objects": [
{
"kind": "Route",
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json
index 4915bb12c..59dc0d22b 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "jenkins-persistent",
"annotations": {
- "openshift.io/display-name": "Jenkins (Persistent)",
+ "openshift.io/display-name": "Jenkins",
"description": "Jenkins service, with persistent storage.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-jenkins",
"tags": "instant-app,jenkins",
@@ -15,6 +15,9 @@
}
},
"message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
+ "labels": {
+ "app": "jenkins-persistent"
+ },
"objects": [
{
"kind": "Route",
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json
index 7f2a5d804..f04adaa67 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "nodejs-mongo-persistent",
"annotations": {
- "openshift.io/display-name": "Node.js + MongoDB (Persistent)",
+ "openshift.io/display-name": "Node.js + MongoDB",
"description": "An example Node.js application with a MongoDB database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"tags": "quickstart,nodejs",
"iconClass": "icon-nodejs",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"labels": {
- "template": "nodejs-mongo-persistent"
+ "template": "nodejs-mongo-persistent",
+ "app": "nodejs-mongo-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json
index b3afae46e..0ce36dba5 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"labels": {
- "template": "nodejs-mongodb-example"
+ "template": "nodejs-mongodb-example",
+ "app": "nodejs-mongodb-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json
index 1c03be28a..10e9382cc 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "rails-pgsql-persistent",
"annotations": {
- "openshift.io/display-name": "Rails + PostgreSQL (Persistent)",
+ "openshift.io/display-name": "Rails + PostgreSQL",
"description": "An example Rails application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"tags": "quickstart,ruby,rails",
"iconClass": "icon-ruby",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"labels": {
- "template": "rails-pgsql-persistent"
+ "template": "rails-pgsql-persistent",
+ "app": "rails-pgsql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json
index 240289d33..8ec2c8ea6 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"labels": {
- "template": "rails-postgresql-example"
+ "template": "rails-postgresql-example",
+ "app": "rails-postgresql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/latest b/roles/openshift_examples/files/examples/v3.9/latest
deleted file mode 120000
index b9bc2fdcb..000000000
--- a/roles/openshift_examples/files/examples/v3.9/latest
+++ /dev/null
@@ -1 +0,0 @@
-latest \ No newline at end of file
diff --git a/roles/openshift_excluder/README.md b/roles/openshift_excluder/README.md
index 80cb88d45..7b43d5adf 100644
--- a/roles/openshift_excluder/README.md
+++ b/roles/openshift_excluder/README.md
@@ -28,7 +28,7 @@ Role Variables
| r_openshift_excluder_verify_upgrade | false | true, false | When upgrading, this variable should be set to true when calling the role |
| r_openshift_excluder_package_state | present | present, latest | Use 'latest' to upgrade openshift_excluder package |
| r_openshift_excluder_docker_package_state | present | present, latest | Use 'latest' to upgrade docker_excluder package |
-| r_openshift_excluder_service_type | None | | (Required) Defined as openshift.common.service_type e.g. atomic-openshift |
+| r_openshift_excluder_service_type | None | | (Required) Defined as openshift_service_type e.g. atomic-openshift |
| r_openshift_excluder_upgrade_target | None | | Required when r_openshift_excluder_verify_upgrade is true, defined as openshift_upgrade_target by Upgrade playbooks e.g. '3.6'|
Dependencies
@@ -46,15 +46,12 @@ Example Playbook
# Disable all excluders
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
# Enable all excluders
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
# Disable all excluders and verify appropriate excluder packages are available for upgrade
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
r_openshift_excluder_verify_upgrade: true
r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
r_openshift_excluder_package_state: latest
diff --git a/roles/openshift_excluder/defaults/main.yml b/roles/openshift_excluder/defaults/main.yml
index d4f151142..3a910e490 100644
--- a/roles/openshift_excluder/defaults/main.yml
+++ b/roles/openshift_excluder/defaults/main.yml
@@ -2,7 +2,7 @@
# keep the 'current' package or update to 'latest' if available?
r_openshift_excluder_package_state: present
r_openshift_excluder_docker_package_state: present
-
+r_openshift_excluder_service_type: "{{ openshift_service_type }}"
# Legacy variables are included for backwards compatibility with v3.5
# Inventory variables Legacy
# openshift_enable_excluders enable_excluders
diff --git a/roles/openshift_excluder/meta/main.yml b/roles/openshift_excluder/meta/main.yml
index 871081c19..a9653edda 100644
--- a/roles/openshift_excluder/meta/main.yml
+++ b/roles/openshift_excluder/meta/main.yml
@@ -12,4 +12,5 @@ galaxy_info:
categories:
- cloud
dependencies:
+- role: openshift_facts
- role: lib_utils
diff --git a/roles/openshift_excluder/tasks/main.yml b/roles/openshift_excluder/tasks/main.yml
index 93d6ef149..f0e87ba25 100644
--- a/roles/openshift_excluder/tasks/main.yml
+++ b/roles/openshift_excluder/tasks/main.yml
@@ -19,11 +19,6 @@
msg: "openshift_excluder role can only be called with 'enable' or 'disable'"
when: r_openshift_excluder_action not in ['enable', 'disable']
- - name: Fail if r_openshift_excluder_service_type is not defined
- fail:
- msg: "r_openshift_excluder_service_type must be specified for this role"
- when: r_openshift_excluder_service_type is not defined
-
- name: Fail if r_openshift_excluder_upgrade_target is not defined
fail:
msg: "r_openshift_excluder_upgrade_target must be provided when using this role for upgrades"
diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml
index a182d23c5..53a3bc87e 100644
--- a/roles/openshift_facts/defaults/main.yml
+++ b/roles/openshift_facts/defaults/main.yml
@@ -98,3 +98,9 @@ openshift_prometheus_alertbuffer_storage_create_pvc: False
openshift_router_selector: "region=infra"
openshift_hosted_router_selector: "{{ openshift_router_selector }}"
openshift_hosted_registry_selector: "{{ openshift_router_selector }}"
+
+openshift_service_type_dict:
+ origin: origin
+ openshift-enterprise: atomic-openshift
+
+openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index b371d347c..a10ba9310 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -94,8 +94,7 @@ def migrate_admission_plugin_facts(facts):
# Merge existing kube_admission_plugin_config with admission_plugin_config.
facts['master']['admission_plugin_config'] = merge_facts(facts['master']['admission_plugin_config'],
facts['master']['kube_admission_plugin_config'],
- additive_facts_to_overwrite=[],
- protected_facts_to_overwrite=[])
+ additive_facts_to_overwrite=[])
# Remove kube_admission_plugin_config fact
facts['master'].pop('kube_admission_plugin_config', None)
return facts
@@ -459,7 +458,6 @@ def set_url_facts_if_unset(facts):
etcd_urls = []
if etcd_hosts != '':
facts['master']['etcd_port'] = ports['etcd']
- facts['master']['embedded_etcd'] = False
for host in etcd_hosts:
etcd_urls.append(format_url(use_ssl['etcd'], host,
ports['etcd']))
@@ -538,7 +536,7 @@ def set_aggregate_facts(facts):
def set_deployment_facts_if_unset(facts):
""" Set Facts that vary based on deployment_type. This currently
- includes common.service_type, master.registry_url, node.registry_url,
+ includes master.registry_url, node.registry_url,
node.storage_plugin_deps
Args:
@@ -550,14 +548,6 @@ def set_deployment_facts_if_unset(facts):
# disabled to avoid breaking up facts related to deployment type into
# multiple methods for now.
# pylint: disable=too-many-statements, too-many-branches
- if 'common' in facts:
- deployment_type = facts['common']['deployment_type']
- if 'service_type' not in facts['common']:
- service_type = 'atomic-openshift'
- if deployment_type == 'origin':
- service_type = 'origin'
- facts['common']['service_type'] = service_type
-
for role in ('master', 'node'):
if role in facts:
deployment_type = facts['common']['deployment_type']
@@ -854,7 +844,7 @@ values provided as a list. Hence the gratuitous use of ['foo'] below.
# If we've added items to the kubelet_args dict then we need
# to merge the new items back into the main facts object.
if kubelet_args != {}:
- facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], [])
+ facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [])
return facts
@@ -876,7 +866,7 @@ def build_controller_args(facts):
controller_args['cloud-provider'] = ['gce']
controller_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
if controller_args != {}:
- facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], [])
+ facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [])
return facts
@@ -898,7 +888,7 @@ def build_api_server_args(facts):
api_server_args['cloud-provider'] = ['gce']
api_server_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
if api_server_args != {}:
- facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], [])
+ facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [])
return facts
@@ -1021,8 +1011,13 @@ def get_container_openshift_version(facts):
If containerized, see if we can determine the installed version via the
systemd environment files.
"""
+ deployment_type = facts['common']['deployment_type']
+ service_type_dict = {'origin': 'origin',
+ 'openshift-enterprise': 'atomic-openshift'}
+ service_type = service_type_dict[deployment_type]
+
for filename in ['/etc/sysconfig/%s-master-controllers', '/etc/sysconfig/%s-node']:
- env_path = filename % facts['common']['service_type']
+ env_path = filename % service_type
if not os.path.exists(env_path):
continue
@@ -1085,7 +1080,7 @@ def apply_provider_facts(facts, provider_facts):
# Disabling pylint too many branches. This function needs refactored
# but is a very core part of openshift_facts.
# pylint: disable=too-many-branches, too-many-nested-blocks
-def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite):
+def merge_facts(orig, new, additive_facts_to_overwrite):
""" Recursively merge facts dicts
Args:
@@ -1093,14 +1088,11 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
new (dict): facts to update
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
- protected_facts_to_overwrite (list): protected facts to overwrite in jinja
- '.' notation ex: ['master.master_count']
Returns:
dict: the merged facts
"""
additive_facts = ['named_certificates']
- protected_facts = ['ha']
# Facts we do not ever want to merge. These originate in inventory variables
# and contain JSON dicts. We don't ever want to trigger a merge
@@ -1132,14 +1124,7 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
if '.' in item and item.startswith(key + '.'):
relevant_additive_facts.append(item)
- # Collect the subset of protected facts to overwrite
- # if key matches. These will be passed to the
- # subsequent merge_facts call.
- relevant_protected_facts = []
- for item in protected_facts_to_overwrite:
- if '.' in item and item.startswith(key + '.'):
- relevant_protected_facts.append(item)
- facts[key] = merge_facts(value, new[key], relevant_additive_facts, relevant_protected_facts)
+ facts[key] = merge_facts(value, new[key], relevant_additive_facts)
# Key matches an additive fact and we are not overwriting
# it so we will append the new value to the existing value.
elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
@@ -1149,18 +1134,6 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
if item not in new_fact:
new_fact.append(item)
facts[key] = new_fact
- # Key matches a protected fact and we are not overwriting
- # it so we will determine if it is okay to change this
- # fact.
- elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]:
- # ha (bool) can not change unless it has been passed
- # as a protected fact to overwrite.
- if key == 'ha':
- if safe_get_bool(value) != safe_get_bool(new[key]):
- # pylint: disable=line-too-long
- module.fail_json(msg='openshift_facts received a different value for openshift.master.ha') # noqa: F405
- else:
- facts[key] = value
# No other condition has been met. Overwrite the old fact
# with the new value.
else:
@@ -1433,7 +1406,6 @@ def set_container_facts_if_unset(facts):
facts['node']['ovs_system_image'] = ovs_image
if safe_get_bool(facts['common']['is_containerized']):
- facts['common']['admin_binary'] = '/usr/local/bin/oadm'
facts['common']['client_binary'] = '/usr/local/bin/oc'
return facts
@@ -1494,8 +1466,6 @@ class OpenShiftFacts(object):
local_facts (dict): local facts to set
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
- protected_facts_to_overwrite (list): protected facts to overwrite in jinja
- '.' notation ex: ['master.master_count']
Raises:
OpenShiftFactsUnsupportedRoleError:
@@ -1511,10 +1481,7 @@ class OpenShiftFacts(object):
# Disabling too-many-arguments, this should be cleaned up as a TODO item.
# pylint: disable=too-many-arguments,no-value-for-parameter
def __init__(self, role, filename, local_facts,
- additive_facts_to_overwrite=None,
- openshift_env=None,
- openshift_env_structures=None,
- protected_facts_to_overwrite=None):
+ additive_facts_to_overwrite=None):
self.changed = False
self.filename = filename
if role not in self.known_roles:
@@ -1536,34 +1503,23 @@ class OpenShiftFacts(object):
self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405
self.facts = self.generate_facts(local_facts,
- additive_facts_to_overwrite,
- openshift_env,
- openshift_env_structures,
- protected_facts_to_overwrite)
+ additive_facts_to_overwrite)
def generate_facts(self,
local_facts,
- additive_facts_to_overwrite,
- openshift_env,
- openshift_env_structures,
- protected_facts_to_overwrite):
+ additive_facts_to_overwrite):
""" Generate facts
Args:
local_facts (dict): local_facts for overriding generated defaults
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
- openshift_env (dict): openshift_env facts for overriding generated defaults
- protected_facts_to_overwrite (list): protected facts to overwrite in jinja
- '.' notation ex: ['master.master_count']
Returns:
dict: The generated facts
"""
+
local_facts = self.init_local_facts(local_facts,
- additive_facts_to_overwrite,
- openshift_env,
- openshift_env_structures,
- protected_facts_to_overwrite)
+ additive_facts_to_overwrite)
roles = local_facts.keys()
if 'common' in local_facts and 'deployment_type' in local_facts['common']:
@@ -1581,8 +1537,7 @@ class OpenShiftFacts(object):
facts = apply_provider_facts(defaults, provider_facts)
facts = merge_facts(facts,
local_facts,
- additive_facts_to_overwrite,
- protected_facts_to_overwrite)
+ additive_facts_to_overwrite)
facts = migrate_oauth_template_facts(facts)
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
@@ -1627,7 +1582,7 @@ class OpenShiftFacts(object):
hostname=hostname,
public_hostname=hostname,
portal_net='172.30.0.0/16',
- client_binary='oc', admin_binary='oadm',
+ client_binary='oc',
dns_domain='cluster.local',
config_base='/etc/origin')
@@ -1639,7 +1594,7 @@ class OpenShiftFacts(object):
console_port='8443', etcd_use_ssl=True,
etcd_hosts='', etcd_port='4001',
portal_net='172.30.0.0/16',
- embedded_etcd=True, embedded_kube=True,
+ embedded_kube=True,
embedded_dns=True,
bind_addr='0.0.0.0',
session_max_seconds=3600,
@@ -1732,65 +1687,17 @@ class OpenShiftFacts(object):
)
return provider_facts
- @staticmethod
- def split_openshift_env_fact_keys(openshift_env_fact, openshift_env_structures):
- """ Split openshift_env facts based on openshift_env structures.
-
- Args:
- openshift_env_fact (string): the openshift_env fact to split
- ex: 'openshift_cloudprovider_openstack_auth_url'
- openshift_env_structures (list): a list of structures to determine fact keys
- ex: ['openshift.cloudprovider.openstack.*']
- Returns:
- list: a list of keys that represent the fact
- ex: ['openshift', 'cloudprovider', 'openstack', 'auth_url']
- """
- # By default, we'll split an openshift_env fact by underscores.
- fact_keys = openshift_env_fact.split('_')
-
- # Determine if any of the provided variable structures match the fact.
- matching_structure = None
- if openshift_env_structures is not None:
- for structure in openshift_env_structures:
- if re.match(structure, openshift_env_fact):
- matching_structure = structure
- # Fact didn't match any variable structures so return the default fact keys.
- if matching_structure is None:
- return fact_keys
-
- final_keys = []
- structure_keys = matching_structure.split('.')
- for structure_key in structure_keys:
- # Matched current key. Add to final keys.
- if structure_key == fact_keys[structure_keys.index(structure_key)]:
- final_keys.append(structure_key)
- # Wildcard means we will be taking everything from here to the end of the fact.
- elif structure_key == '*':
- final_keys.append('_'.join(fact_keys[structure_keys.index(structure_key):]))
- # Shouldn't have gotten here, return the fact keys.
- else:
- return fact_keys
- return final_keys
-
# Disabling too-many-branches and too-many-locals.
# This should be cleaned up as a TODO item.
# pylint: disable=too-many-branches, too-many-locals
def init_local_facts(self, facts=None,
- additive_facts_to_overwrite=None,
- openshift_env=None,
- openshift_env_structures=None,
- protected_facts_to_overwrite=None):
+ additive_facts_to_overwrite=None):
""" Initialize the local facts
Args:
facts (dict): local facts to set
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
- openshift_env (dict): openshift env facts to set
- protected_facts_to_overwrite (list): protected facts to overwrite in jinja
- '.' notation ex: ['master.master_count']
-
-
Returns:
dict: The result of merging the provided facts with existing
local facts
@@ -1802,45 +1709,13 @@ class OpenShiftFacts(object):
if facts is not None:
facts_to_set[self.role] = facts
- if openshift_env != {} and openshift_env is not None:
- for fact, value in iteritems(openshift_env):
- oo_env_facts = dict()
- current_level = oo_env_facts
- keys = self.split_openshift_env_fact_keys(fact, openshift_env_structures)[1:]
-
- if len(keys) > 0 and keys[0] != self.role:
- continue
-
- # Build a dictionary from the split fact keys.
- # After this loop oo_env_facts is the resultant dictionary.
- # For example:
- # fact = "openshift_metrics_install_metrics"
- # value = 'true'
- # keys = ['metrics', 'install', 'metrics']
- # result = {'metrics': {'install': {'metrics': 'true'}}}
- for i, _ in enumerate(keys):
- # This is the last key. Set the value.
- if i == (len(keys) - 1):
- current_level[keys[i]] = value
- # This is a key other than the last key. Set as
- # dictionary and continue.
- else:
- current_level[keys[i]] = dict()
- current_level = current_level[keys[i]]
-
- facts_to_set = merge_facts(orig=facts_to_set,
- new=oo_env_facts,
- additive_facts_to_overwrite=[],
- protected_facts_to_overwrite=[])
-
local_facts = get_local_facts_from_file(self.filename)
migrated_facts = migrate_local_facts(local_facts)
new_local_facts = merge_facts(migrated_facts,
facts_to_set,
- additive_facts_to_overwrite,
- protected_facts_to_overwrite)
+ additive_facts_to_overwrite)
new_local_facts = self.remove_empty_facts(new_local_facts)
@@ -1948,9 +1823,6 @@ def main():
choices=OpenShiftFacts.known_roles),
local_facts=dict(default=None, type='dict', required=False),
additive_facts_to_overwrite=dict(default=[], type='list', required=False),
- openshift_env=dict(default={}, type='dict', required=False),
- openshift_env_structures=dict(default=[], type='list', required=False),
- protected_facts_to_overwrite=dict(default=[], type='list', required=False)
),
supports_check_mode=True,
add_file_common_args=True,
@@ -1966,19 +1838,13 @@ def main():
role = module.params['role'] # noqa: F405
local_facts = module.params['local_facts'] # noqa: F405
additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] # noqa: F405
- openshift_env = module.params['openshift_env'] # noqa: F405
- openshift_env_structures = module.params['openshift_env_structures'] # noqa: F405
- protected_facts_to_overwrite = module.params['protected_facts_to_overwrite'] # noqa: F405
fact_file = '/etc/ansible/facts.d/openshift.fact'
openshift_facts = OpenShiftFacts(role,
fact_file,
local_facts,
- additive_facts_to_overwrite,
- openshift_env,
- openshift_env_structures,
- protected_facts_to_overwrite)
+ additive_facts_to_overwrite)
file_params = module.params.copy() # noqa: F405
file_params['path'] = fact_file
diff --git a/roles/openshift_health_checker/HOWTO_CHECKS.md b/roles/openshift_health_checker/HOWTO_CHECKS.md
index 6c5662a4e..94961f2d4 100644
--- a/roles/openshift_health_checker/HOWTO_CHECKS.md
+++ b/roles/openshift_health_checker/HOWTO_CHECKS.md
@@ -12,7 +12,7 @@ Checks are typically implemented as two parts:
The checks are called from Ansible playbooks via the `openshift_health_check`
action plugin. See
-[playbooks/byo/openshift-preflight/check.yml](../../playbooks/byo/openshift-preflight/check.yml)
+[playbooks/openshift-checks/pre-install.yml](../../playbooks/openshift-checks/pre-install.yml)
for an example.
The action plugin dynamically discovers all checks and executes only those
diff --git a/roles/openshift_health_checker/defaults/main.yml b/roles/openshift_health_checker/defaults/main.yml
new file mode 100644
index 000000000..f25a0dc79
--- /dev/null
+++ b/roles/openshift_health_checker/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+openshift_service_type_dict:
+ origin: origin
+ openshift-enterprise: atomic-openshift
+
+openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py
index 090e438ff..980e23f27 100644
--- a/roles/openshift_health_checker/openshift_checks/package_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/package_availability.py
@@ -15,7 +15,9 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
return super(PackageAvailability, self).is_active() and self.get_var("ansible_pkg_mgr") == "yum"
def run(self):
- rpm_prefix = self.get_var("openshift", "common", "service_type")
+ rpm_prefix = self.get_var("openshift_service_type")
+ if self._templar is not None:
+ rpm_prefix = self._templar.template(rpm_prefix)
group_names = self.get_var("group_names", default=[])
packages = set()
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index 13a91dadf..f3a628e28 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -41,7 +41,9 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
return super(PackageVersion, self).is_active() and master_or_node
def run(self):
- rpm_prefix = self.get_var("openshift", "common", "service_type")
+ rpm_prefix = self.get_var("openshift_service_type")
+ if self._templar is not None:
+ rpm_prefix = self._templar.template(rpm_prefix)
openshift_release = self.get_var("openshift_release", default='')
deployment_type = self.get_var("openshift_deployment_type")
check_multi_minor_release = deployment_type in ['openshift-enterprise']
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index ec46c3b4b..fc333dfd4 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -8,12 +8,12 @@ def task_vars():
return dict(
openshift=dict(
common=dict(
- service_type='origin',
is_containerized=False,
is_atomic=False,
),
docker=dict(),
),
+ openshift_service_type='origin',
openshift_deployment_type='origin',
openshift_image_tag='',
group_names=['oo_nodes_to_config', 'oo_masters_to_config'],
diff --git a/roles/openshift_health_checker/test/etcd_traffic_test.py b/roles/openshift_health_checker/test/etcd_traffic_test.py
index dd6f4ad81..a29dc166b 100644
--- a/roles/openshift_health_checker/test/etcd_traffic_test.py
+++ b/roles/openshift_health_checker/test/etcd_traffic_test.py
@@ -37,8 +37,9 @@ def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words)
task_vars = dict(
group_names=group_names,
openshift=dict(
- common=dict(service_type="origin", is_containerized=False),
- )
+ common=dict(is_containerized=False),
+ ),
+ openshift_service_type="origin"
)
result = EtcdTraffic(execute_module, task_vars).run()
diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py
index 6f0457549..dd98ff4d8 100644
--- a/roles/openshift_health_checker/test/ovs_version_test.py
+++ b/roles/openshift_health_checker/test/ovs_version_test.py
@@ -10,10 +10,11 @@ def test_openshift_version_not_supported():
openshift_release = '111.7.0'
task_vars = dict(
- openshift=dict(common=dict(service_type='origin')),
+ openshift=dict(common=dict()),
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
openshift_deployment_type='origin',
+ openshift_service_type='origin'
)
with pytest.raises(OpenShiftCheckException) as excinfo:
@@ -27,9 +28,10 @@ def test_invalid_openshift_release_format():
return {}
task_vars = dict(
- openshift=dict(common=dict(service_type='origin')),
+ openshift=dict(common=dict()),
openshift_image_tag='v0',
openshift_deployment_type='origin',
+ openshift_service_type='origin'
)
with pytest.raises(OpenShiftCheckException) as excinfo:
@@ -47,9 +49,10 @@ def test_invalid_openshift_release_format():
])
def test_ovs_package_version(openshift_release, expected_ovs_version):
task_vars = dict(
- openshift=dict(common=dict(service_type='origin')),
+ openshift=dict(common=dict()),
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
+ openshift_service_type='origin'
)
return_value = {} # note: check.execute_module modifies return hash contents
diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py
index 9815acb38..a1e6e0879 100644
--- a/roles/openshift_health_checker/test/package_availability_test.py
+++ b/roles/openshift_health_checker/test/package_availability_test.py
@@ -19,13 +19,13 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
@pytest.mark.parametrize('task_vars,must_have_packages,must_not_have_packages', [
(
- dict(openshift=dict(common=dict(service_type='openshift'))),
+ dict(openshift_service_type='origin'),
set(),
set(['openshift-master', 'openshift-node']),
),
(
dict(
- openshift=dict(common=dict(service_type='origin')),
+ openshift_service_type='origin',
group_names=['oo_masters_to_config'],
),
set(['origin-master']),
@@ -33,7 +33,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
),
(
dict(
- openshift=dict(common=dict(service_type='atomic-openshift')),
+ openshift_service_type='atomic-openshift',
group_names=['oo_nodes_to_config'],
),
set(['atomic-openshift-node']),
@@ -41,7 +41,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
),
(
dict(
- openshift=dict(common=dict(service_type='atomic-openshift')),
+ openshift_service_type='atomic-openshift',
group_names=['oo_masters_to_config', 'oo_nodes_to_config'],
),
set(['atomic-openshift-master', 'atomic-openshift-node']),
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index 3cf4ce033..ea8e02b97 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -4,9 +4,12 @@ from openshift_checks.package_version import PackageVersion, OpenShiftCheckExcep
def task_vars_for(openshift_release, deployment_type):
+ service_type_dict = {'origin': 'origin',
+ 'openshift-enterprise': 'atomic-openshift'}
+ service_type = service_type_dict[deployment_type]
return dict(
ansible_pkg_mgr='yum',
- openshift=dict(common=dict(service_type=deployment_type)),
+ openshift_service_type=service_type,
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
openshift_deployment_type=deployment_type,
@@ -29,7 +32,7 @@ def test_openshift_version_not_supported():
def test_invalid_openshift_release_format():
task_vars = dict(
ansible_pkg_mgr='yum',
- openshift=dict(common=dict(service_type='origin')),
+ openshift_service_type='origin',
openshift_image_tag='v0',
openshift_deployment_type='origin',
)
diff --git a/roles/openshift_hosted/README.md b/roles/openshift_hosted/README.md
index a1c2c3956..3cf5d97c5 100644
--- a/roles/openshift_hosted/README.md
+++ b/roles/openshift_hosted/README.md
@@ -43,9 +43,9 @@ variables also control configuration behavior:
**NOTE:** Configuring a value for
`openshift_hosted_registry_storage_glusterfs_ips` with a `glusterfs_registry`
-host group is not allowed. Specifying a `glusterfs_registry` host group
-indicates that a new GlusterFS cluster should be configured, whereas
-specifying `openshift_hosted_registry_storage_glusterfs_ips` indicates wanting
+host group is not allowed. Specifying a `glusterfs_registry` host group
+indicates that a new GlusterFS cluster should be configured, whereas
+specifying `openshift_hosted_registry_storage_glusterfs_ips` indicates wanting
to use a pre-configured GlusterFS cluster for the registry storage.
_
@@ -53,7 +53,6 @@ _
Dependencies
------------
-* openshift_hosted_facts
* openshift_persistent_volumes
Example Playbook
diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml
index 1d70ef7eb..ac9e241a5 100644
--- a/roles/openshift_hosted/meta/main.yml
+++ b/roles/openshift_hosted/meta/main.yml
@@ -12,6 +12,6 @@ galaxy_info:
categories:
- cloud
dependencies:
-- role: openshift_hosted_facts
+- role: openshift_facts
- role: lib_openshift
- role: lib_os_firewall
diff --git a/roles/openshift_hosted/tasks/registry.yml b/roles/openshift_hosted/tasks/registry.yml
index de302c740..429f0c514 100644
--- a/roles/openshift_hosted/tasks/registry.yml
+++ b/roles/openshift_hosted/tasks/registry.yml
@@ -126,7 +126,7 @@
selector: "{{ openshift_hosted_registry_selector }}"
replicas: "{{ openshift_hosted_registry_replicas | default(l_default_replicas) }}"
service_account: "{{ openshift_hosted_registry_serviceaccount }}"
- images: "{{ penshift_hosted_registry_registryurl }}"
+ images: "{{ openshift_hosted_registry_registryurl }}"
env_vars: "{{ openshift_hosted_registry_env_vars }}"
volume_mounts: "{{ openshift_hosted_registry_volumes }}"
edits: "{{ openshift_hosted_registry_edits }}"
diff --git a/roles/openshift_hosted/templates/v3.8/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.8/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..3c874d910
--- /dev/null
+++ b/roles/openshift_hosted/templates/v3.8/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
+subsets:
+- addresses:
+{% for ip in openshift_hosted_registry_storage_glusterfs_ips %}
+ - ip: {{ ip }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_hosted/templates/v3.8/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.8/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..f18c94a4f
--- /dev/null
+++ b/roles/openshift_hosted/templates/v3.8/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_hosted/templates/v3.9/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.9/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..3c874d910
--- /dev/null
+++ b/roles/openshift_hosted/templates/v3.9/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
+subsets:
+- addresses:
+{% for ip in openshift_hosted_registry_storage_glusterfs_ips %}
+ - ip: {{ ip }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_hosted/templates/v3.9/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.9/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..f18c94a4f
--- /dev/null
+++ b/roles/openshift_hosted/templates/v3.9/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_hosted_facts/meta/main.yml b/roles/openshift_hosted_facts/meta/main.yml
deleted file mode 100644
index dd2de07bc..000000000
--- a/roles/openshift_hosted_facts/meta/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-galaxy_info:
- author: Andrew Butcher
- description: OpenShift Hosted Facts
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 1.9
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
-dependencies:
-- role: openshift_facts
diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/openshift_hosted_facts/tasks/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/openshift_logging/handlers/main.yml b/roles/openshift_logging/handlers/main.yml
index e0329ee7c..1f4b5a116 100644
--- a/roles/openshift_logging/handlers/main.yml
+++ b/roles/openshift_logging/handlers/main.yml
@@ -1,12 +1,12 @@
---
- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
when: (not (master_api_service_status_changed | default(false) | bool))
notify: Verify API Server
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
diff --git a/roles/openshift_logging/tasks/annotate_ops_projects.yaml b/roles/openshift_logging/tasks/annotate_ops_projects.yaml
index fcb4c94d3..59d6098d4 100644
--- a/roles/openshift_logging/tasks/annotate_ops_projects.yaml
+++ b/roles/openshift_logging/tasks/annotate_ops_projects.yaml
@@ -1,17 +1,20 @@
---
-- oc_obj:
- state: list
- kind: project
- name: "{{ item }}"
- with_items: "{{ __default_logging_ops_projects }}"
+- command: >
+ {{ openshift.common.client_binary }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ get namespaces -o jsonpath={.items[*].metadata.name} {{ __default_logging_ops_projects | join(' ') }}
register: __logging_ops_projects
- name: Annotate Operations Projects
oc_edit:
kind: ns
- name: "{{ item.item }}"
+ name: "{{ project }}"
separator: '#'
content:
metadata#annotations#openshift.io/logging.ui.hostname: "{{ openshift_logging_kibana_ops_hostname }}"
- with_items: "{{ __logging_ops_projects.results }}"
- when: item.results.stderr is not defined
+ with_items: "{{ __logging_ops_projects.stdout.split(' ') }}"
+ loop_control:
+ loop_var: project
+ when:
+ - __logging_ops_projects.stderr | length == 0
+ - openshift_logging_use_ops | default(false) | bool
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index ffed956a4..af36d67c6 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -107,6 +107,24 @@
- logging-fluentd
- logging-mux
+# remove annotations added by logging
+- command: >
+ {{ openshift.common.client_binary }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ get namespaces -o name {{ __default_logging_ops_projects | join(' ') }}
+ register: __logging_ops_projects
+
+- name: Remove Annotation of Operations Projects
+ command: >
+ {{ openshift.common.client_binary }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ annotate {{ project }} openshift.io/logging.ui.hostname-
+ with_items: "{{ __logging_ops_projects.stdout_lines }}"
+ loop_control:
+ loop_var: project
+ when:
+ - __logging_ops_projects.stderr | length == 0
+
## EventRouter
- include_role:
name: openshift_logging_eventrouter
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
index 91db457d1..9949bb95d 100644
--- a/roles/openshift_logging/tasks/main.yaml
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -31,3 +31,4 @@
local_action: file path="{{local_tmp.stdout}}" state=absent
tags: logging_cleanup
changed_when: False
+ become: no
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index 0bfa9e85b..bf04094a3 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -9,6 +9,7 @@ metadata:
logging-infra: "{{logging_component}}"
spec:
replicas: {{es_replicas|default(1)}}
+ revisionHistoryLimit: 0
selector:
provider: openshift
component: "{{component}}"
diff --git a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml
index e92a35f27..12b4f5bfd 100644
--- a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml
+++ b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml
@@ -8,3 +8,4 @@
# wait half a second between labels
- local_action: command sleep {{ openshift_logging_fluentd_label_delay | default('.5') }}
+ become: no
diff --git a/roles/openshift_management/README.md b/roles/openshift_management/README.md
index 96de82669..974d9781a 100644
--- a/roles/openshift_management/README.md
+++ b/roles/openshift_management/README.md
@@ -164,14 +164,14 @@ away.
If you want to install CFME/MIQ at the same time you install your
OCP/Origin cluster, ensure that `openshift_management_install_management` is set
to `true` in your inventory. Call the standard
-`playbooks/byo/config.yml` playbook to begin the cluster and CFME/MIQ
+`playbooks/deploy_cluster.yml` playbook to begin the cluster and CFME/MIQ
installation.
If you are installing CFME/MIQ on an *already provisioned cluster*
then you can call the CFME/MIQ playbook directly:
```
-$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/config.yml
+$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/openshift-management/config.yml
```
*Note: Use `miq-template` in the following examples for ManageIQ installs*
@@ -489,7 +489,7 @@ This playbook will:
```
-$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/add_container_provider.yml
+$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/openshift-management/add_container_provider.yml
```
## Multiple Providers
@@ -567,7 +567,7 @@ the config file path.
```
$ ansible-playbook -v -e container_providers_config=/tmp/cp.yml \
- playbooks/byo/openshift-management/add_many_container_providers.yml
+ playbooks/openshift-management/add_many_container_providers.yml
```
Afterwards you will find two new container providers in your
@@ -579,7 +579,7 @@ to see an overview.
This role includes a playbook to uninstall and erase the CFME/MIQ
installation:
-* `playbooks/byo/openshift-management/uninstall.yml`
+* `playbooks/openshift-management/uninstall.yml`
NFS export definitions and data stored on NFS exports are not
automatically removed. You are urged to manually erase any data from
diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml
index e768961ce..b5e234b7f 100644
--- a/roles/openshift_management/defaults/main.yml
+++ b/roles/openshift_management/defaults/main.yml
@@ -88,7 +88,7 @@ openshift_management_storage_nfs_local_hostname: false
# name and password AND are trying to use integration scripts.
#
# For example, adding this cluster as a container provider,
-# playbooks/byo/openshift-management/add_container_provider.yml
+# playbooks/openshift-management/add_container_provider.yml
openshift_management_username: admin
openshift_management_password: smartvm
diff --git a/roles/openshift_management/meta/main.yml b/roles/openshift_management/meta/main.yml
index 07ad51126..9f19704a8 100644
--- a/roles/openshift_management/meta/main.yml
+++ b/roles/openshift_management/meta/main.yml
@@ -16,3 +16,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: lib_utils
+- role: openshift_facts
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 38b2fd8b8..efd119299 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -54,6 +54,48 @@ ha_svc_template_path: "native-cluster"
openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
+openshift_master_loopback_config: "{{ openshift_master_config_dir }}/openshift-master.kubeconfig"
+loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}"
+openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml"
+openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json"
+
+scheduler_config:
+ kind: Policy
+ apiVersion: v1
+ predicates: "{{ openshift_master_scheduler_predicates
+ | default(openshift_master_scheduler_current_predicates
+ | default(openshift_master_scheduler_default_predicates)) }}"
+ priorities: "{{ openshift_master_scheduler_priorities
+ | default(openshift_master_scheduler_current_priorities
+ | default(openshift_master_scheduler_default_priorities)) }}"
+
+openshift_master_valid_grant_methods:
+- auto
+- prompt
+- deny
+
+openshift_master_is_scaleup_host: False
+
+# These defaults assume forcing journald persistence, fsync to disk once
+# a second, rate-limiting to 10,000 logs a second, no forwarding to
+# syslog or wall, using 8GB of disk space maximum, using 10MB journal
+# files, keeping only a days worth of logs per journal file, and
+# retaining journal files no longer than a month.
+journald_vars_to_replace:
+- { var: Storage, val: persistent }
+- { var: Compress, val: yes }
+- { var: SyncIntervalSec, val: 1s }
+- { var: RateLimitInterval, val: 1s }
+- { var: RateLimitBurst, val: 10000 }
+- { var: SystemMaxUse, val: 8G }
+- { var: SystemKeepFree, val: 20% }
+- { var: SystemMaxFileSize, val: 10M }
+- { var: MaxRetentionSec, val: 1month }
+- { var: MaxFileSec, val: 1day }
+- { var: ForwardToSyslog, val: no }
+- { var: ForwardToWall, val: no }
+
+
# NOTE
# r_openshift_master_*_default may be defined external to this role.
# openshift_use_*, if defined, may affect other roles or play behavior.
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index e6b8b8ac8..557bfe022 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -1,7 +1,7 @@
---
- name: restart master api
systemd:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
state: restarted
when:
- not (master_api_service_status_changed | default(false) | bool)
@@ -10,7 +10,7 @@
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 5f4e6df71..9be5508aa 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -16,7 +16,7 @@
- name: Install Master package
package:
- name: "{{ openshift.common.service_type }}-master{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
when:
- not openshift.common.is_containerized | bool
@@ -141,7 +141,7 @@
# The template file will stomp any other settings made.
- block:
- name: check whether our docker-registry setting exists in the env file
- command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift.common.service_type }}-master"
+ command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift_service_type }}-master"
failed_when: false
changed_when: false
register: l_already_set
@@ -203,7 +203,7 @@
- name: Start and enable master api on first master
systemd:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
enabled: yes
state: started
when:
@@ -214,7 +214,7 @@
delay: 60
- name: Dump logs from master-api if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-api
+ command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-api
when:
- l_start_result | failed
@@ -230,7 +230,7 @@
- name: Start and enable master api all masters
systemd:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
enabled: yes
state: started
when:
@@ -241,7 +241,7 @@
delay: 60
- name: Dump logs from master-api if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-api
+ command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-api
when:
- l_start_result | failed
@@ -258,7 +258,7 @@
- name: Start and enable master controller service
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
enabled: yes
state: started
register: l_start_result
@@ -267,7 +267,7 @@
delay: 60
- name: Dump logs from master-controllers if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-controllers
+ command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-controllers
when:
- l_start_result | failed
diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml
index ca04d2243..8b342a5b4 100644
--- a/roles/openshift_master/tasks/registry_auth.yml
+++ b/roles/openshift_master/tasks/registry_auth.yml
@@ -32,7 +32,7 @@
when:
- openshift_docker_alternative_creds | default(False) | bool
- oreg_auth_user is defined
- - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
register: master_oreg_auth_credentials_create_alt
notify:
- restart master api
diff --git a/roles/openshift_master/tasks/restart.yml b/roles/openshift_master/tasks/restart.yml
index 4f8b758fd..715347101 100644
--- a/roles/openshift_master/tasks/restart.yml
+++ b/roles/openshift_master/tasks/restart.yml
@@ -1,7 +1,7 @@
---
- name: Restart master API
service:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
state: restarted
when: openshift_master_ha | bool
- name: Wait for master API to come back online
@@ -14,7 +14,7 @@
when: openshift_master_ha | bool
- name: Restart master controllers
service:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: restarted
# Ignore errrors since it is possible that type != simple for
# pre-3.1.1 installations.
diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml
index 23386f11b..f6c5ce0dd 100644
--- a/roles/openshift_master/tasks/system_container.yml
+++ b/roles/openshift_master/tasks/system_container.yml
@@ -1,8 +1,4 @@
---
-- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
- name: Pre-pull master system container image
command: >
@@ -12,12 +8,12 @@
- name: Check Master system container package
command: >
- atomic containers list --no-trunc -a -f container={{ openshift.common.service_type }}-master
+ atomic containers list --no-trunc -a -f container={{ openshift_service_type }}-master
# HA
- name: Install or Update HA api master system container
oc_atomic_container:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
state: latest
values:
@@ -25,7 +21,7 @@
- name: Install or Update HA controller master system container
oc_atomic_container:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
state: latest
values:
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index ee76413e3..76b6f46aa 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -13,7 +13,7 @@
- name: Disable the legacy master service if it exists
systemd:
- name: "{{ openshift.common.service_type }}-master"
+ name: "{{ openshift_service_type }}-master"
state: stopped
enabled: no
masked: yes
@@ -21,7 +21,7 @@
- name: Remove the legacy master service if it exists
file:
- path: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master.service"
+ path: "{{ containerized_svc_dir }}/{{ openshift_service_type }}-master.service"
state: absent
ignore_errors: true
when:
@@ -40,7 +40,7 @@
- name: Create the ha systemd unit files
template:
src: "{{ ha_svc_template_path }}/atomic-openshift-master-{{ item }}.service.j2"
- dest: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master-{{ item }}.service"
+ dest: "{{ containerized_svc_dir }}/{{ openshift_service_type }}-master-{{ item }}.service"
when:
- not l_is_master_system_container | bool
with_items:
@@ -55,7 +55,7 @@
- name: enable master services
systemd:
- name: "{{ openshift.common.service_type }}-master-{{ item }}"
+ name: "{{ openshift_service_type }}-master-{{ item }}"
enabled: yes
with_items:
- api
@@ -64,13 +64,13 @@
- not l_is_master_system_container | bool
- name: Preserve Master API Proxy Config options
- command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ command: grep PROXY /etc/sysconfig/{{ openshift_service_type }}-master-api
register: l_master_api_proxy
failed_when: false
changed_when: false
- name: Preserve Master API AWS options
- command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ command: grep AWS_ /etc/sysconfig/{{ openshift_service_type }}-master-api
register: master_api_aws
failed_when: false
changed_when: false
@@ -78,7 +78,7 @@
- name: Create the master api service env file
template:
src: "{{ ha_svc_template_path }}/atomic-openshift-master-api.j2"
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-api
backup: true
notify:
- restart master api
@@ -89,7 +89,7 @@
- "'http_proxy' not in openshift.common"
- "'https_proxy' not in openshift.common"
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-api
line: "{{ item }}"
with_items: "{{ l_master_api_proxy.stdout_lines | default([]) }}"
@@ -98,19 +98,19 @@
- master_api_aws.rc == 0
- not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined)
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-api
line: "{{ item }}"
with_items: "{{ master_api_aws.stdout_lines | default([]) }}"
no_log: True
- name: Preserve Master Controllers Proxy Config options
- command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ command: grep PROXY /etc/sysconfig/{{ openshift_service_type }}-master-controllers
register: master_controllers_proxy
failed_when: false
changed_when: false
- name: Preserve Master Controllers AWS options
- command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ command: grep AWS_ /etc/sysconfig/{{ openshift_service_type }}-master-controllers
register: master_controllers_aws
failed_when: false
changed_when: false
@@ -118,14 +118,14 @@
- name: Create the master controllers service env file
template:
src: "{{ ha_svc_template_path }}/atomic-openshift-master-controllers.j2"
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-controllers
backup: true
notify:
- restart master controllers
- name: Restore Master Controllers Proxy Config Options
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-controllers
line: "{{ item }}"
with_items: "{{ master_controllers_proxy.stdout_lines | default([]) }}"
when:
@@ -135,7 +135,7 @@
- name: Restore Master Controllers AWS Options
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-controllers
line: "{{ item }}"
with_items: "{{ master_controllers_aws.stdout_lines | default([]) }}"
when:
diff --git a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
index caab3045a..f50b91ff5 100644
--- a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
+++ b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
@@ -12,11 +12,11 @@
package: name={{ master_pkgs | join(',') }} state=present
vars:
master_pkgs:
- - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
- - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-master{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-clients{{ openshift_pkg_version }}"
+ - "tuned-profiles-{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
register: result
until: result | success
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index cec3d3fb1..5e46d9121 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -3,18 +3,18 @@ Description=Atomic OpenShift Master API
Documentation=https://github.com/openshift/origin
After=etcd_container.service
Wants=etcd_container.service
-Before={{ openshift.common.service_type }}-node.service
+Before={{ openshift_service_type }}-node.service
After={{ openshift_docker_service_name }}.service
PartOf={{ openshift_docker_service_name }}.service
Requires={{ openshift_docker_service_name }}.service
[Service]
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-master-api
Environment=GOTRACEBACK=crash
-ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api
+ExecStartPre=-/usr/bin/docker rm -f {{ openshift_service_type}}-master-api
ExecStart=/usr/bin/docker run --rm --privileged --net=host \
- --name {{ openshift.common.service_type }}-master-api \
- --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api \
+ --name {{ openshift_service_type }}-master-api \
+ --env-file=/etc/sysconfig/{{ openshift_service_type }}-master-api \
-v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \
-v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock \
-v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \
@@ -24,14 +24,14 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \
{{ openshift.master.master_image }}:${IMAGE_VERSION} start master api \
--config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
-ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api
+ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-master-api
LimitNOFILE=131072
LimitCORE=infinity
WorkingDirectory={{ r_openshift_master_data_dir }}
-SyslogIdentifier={{ openshift.common.service_type }}-master-api
+SyslogIdentifier={{ openshift_service_type }}-master-api
Restart=always
RestartSec=5s
[Install]
WantedBy={{ openshift_docker_service_name }}.service
-WantedBy={{ openshift.common.service_type }}-node.service
+WantedBy={{ openshift_service_type }}-node.service
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index a0248151d..899575f1a 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -1,19 +1,19 @@
[Unit]
Description=Atomic OpenShift Master Controllers
Documentation=https://github.com/openshift/origin
-Wants={{ openshift.common.service_type }}-master-api.service
-After={{ openshift.common.service_type }}-master-api.service
+Wants={{ openshift_service_type }}-master-api.service
+After={{ openshift_service_type }}-master-api.service
After={{ openshift_docker_service_name }}.service
Requires={{ openshift_docker_service_name }}.service
PartOf={{ openshift_docker_service_name }}.service
[Service]
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-master-controllers
Environment=GOTRACEBACK=crash
-ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers
+ExecStartPre=-/usr/bin/docker rm -f {{ openshift_service_type}}-master-controllers
ExecStart=/usr/bin/docker run --rm --privileged --net=host \
- --name {{ openshift.common.service_type }}-master-controllers \
- --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers \
+ --name {{ openshift_service_type }}-master-controllers \
+ --env-file=/etc/sysconfig/{{ openshift_service_type }}-master-controllers \
-v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \
-v /var/run/docker.sock:/var/run/docker.sock \
-v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \
@@ -23,11 +23,11 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \
{{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers \
--config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
-ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers
+ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-master-controllers
LimitNOFILE=131072
LimitCORE=infinity
WorkingDirectory={{ r_openshift_master_data_dir }}
-SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
+SyslogIdentifier={{ openshift_service_type }}-master-controllers
Restart=always
RestartSec=5s
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 92668b227..f1a76e5f5 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -69,29 +69,13 @@ dnsConfig:
bindNetwork: tcp4
{% endif %}
etcdClientInfo:
- ca: {{ "ca-bundle.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }}
+ ca: master.etcd-ca.crt
certFile: master.etcd-client.crt
keyFile: master.etcd-client.key
urls:
{% for etcd_url in openshift.master.etcd_urls %}
- {{ etcd_url }}
{% endfor %}
-{% if openshift.master.embedded_etcd | bool %}
-etcdConfig:
- address: {{ openshift.common.hostname }}:{{ openshift.master.etcd_port }}
- peerAddress: {{ openshift.common.hostname }}:7001
- peerServingInfo:
- bindAddress: {{ openshift.master.bind_addr }}:7001
- certFile: etcd.server.crt
- clientCA: ca-bundle.crt
- keyFile: etcd.server.key
- servingInfo:
- bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.etcd_port }}
- certFile: etcd.server.crt
- clientCA: ca-bundle.crt
- keyFile: etcd.server.key
- storageDirectory: {{ r_openshift_master_data_dir }}/openshift.local.etcd
-{% endif %}
etcdStorageConfig:
kubernetesStoragePrefix: kubernetes.io
kubernetesStorageVersion: v1
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
index 02bfd6f62..ed8a47df8 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
@@ -3,12 +3,12 @@ Description=Atomic OpenShift Master API
Documentation=https://github.com/openshift/origin
After=network-online.target
After=etcd.service
-Before={{ openshift.common.service_type }}-node.service
+Before={{ openshift_service_type }}-node.service
Requires=network-online.target
[Service]
Type=notify
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-master-api
Environment=GOTRACEBACK=crash
ExecStart=/usr/bin/openshift start master api --config=${CONFIG_FILE} $OPTIONS
LimitNOFILE=131072
@@ -20,4 +20,4 @@ RestartSec=5s
[Install]
WantedBy=multi-user.target
-WantedBy={{ openshift.common.service_type }}-node.service
+WantedBy={{ openshift_service_type }}-node.service
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
index fae021845..b36963f73 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
@@ -2,19 +2,19 @@
Description=Atomic OpenShift Master Controllers
Documentation=https://github.com/openshift/origin
After=network-online.target
-After={{ openshift.common.service_type }}-master-api.service
-Wants={{ openshift.common.service_type }}-master-api.service
+After={{ openshift_service_type }}-master-api.service
+Wants={{ openshift_service_type }}-master-api.service
Requires=network-online.target
[Service]
Type=notify
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-master-controllers
Environment=GOTRACEBACK=crash
ExecStart=/usr/bin/openshift start master controllers --config=${CONFIG_FILE} $OPTIONS
LimitNOFILE=131072
LimitCORE=infinity
WorkingDirectory={{ r_openshift_master_data_dir }}
-SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
+SyslogIdentifier={{ openshift_service_type }}-master-controllers
Restart=always
RestartSec=5s
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
deleted file mode 100644
index 0c681c764..000000000
--- a/roles/openshift_master/vars/main.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-openshift_master_loopback_config: "{{ openshift_master_config_dir }}/openshift-master.kubeconfig"
-loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}"
-openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml"
-openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json"
-
-scheduler_config:
- kind: Policy
- apiVersion: v1
- predicates: "{{ openshift_master_scheduler_predicates
- | default(openshift_master_scheduler_current_predicates
- | default(openshift_master_scheduler_default_predicates)) }}"
- priorities: "{{ openshift_master_scheduler_priorities
- | default(openshift_master_scheduler_current_priorities
- | default(openshift_master_scheduler_default_priorities)) }}"
-
-openshift_master_valid_grant_methods:
-- auto
-- prompt
-- deny
-
-openshift_master_is_scaleup_host: False
-
-# These defaults assume forcing journald persistence, fsync to disk once
-# a second, rate-limiting to 10,000 logs a second, no forwarding to
-# syslog or wall, using 8GB of disk space maximum, using 10MB journal
-# files, keeping only a days worth of logs per journal file, and
-# retaining journal files no longer than a month.
-journald_vars_to_replace:
-- { var: Storage, val: persistent }
-- { var: Compress, val: yes }
-- { var: SyncIntervalSec, val: 1s }
-- { var: RateLimitInterval, val: 1s }
-- { var: RateLimitBurst, val: 10000 }
-- { var: SystemMaxUse, val: 8G }
-- { var: SystemKeepFree, val: 20% }
-- { var: SystemMaxFileSize, val: 10M }
-- { var: MaxRetentionSec, val: 1month }
-- { var: MaxFileSec, val: 1day }
-- { var: ForwardToSyslog, val: no }
-- { var: ForwardToWall, val: no }
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index 0cb87dcaa..418dcba67 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -45,7 +45,6 @@
etcd_port: "{{ openshift_master_etcd_port | default(None) }}"
etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}"
etcd_urls: "{{ openshift_master_etcd_urls | default(None) }}"
- embedded_etcd: "{{ openshift_master_embedded_etcd | default(None) }}"
embedded_kube: "{{ openshift_master_embedded_kube | default(None) }}"
embedded_dns: "{{ openshift_master_embedded_dns | default(None) }}"
bind_addr: "{{ openshift_master_bind_addr | default(None) }}"
diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml
index e0329ee7c..1f4b5a116 100644
--- a/roles/openshift_metrics/handlers/main.yml
+++ b/roles/openshift_metrics/handlers/main.yml
@@ -1,12 +1,12 @@
---
- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
when: (not (master_api_service_status_changed | default(false) | bool))
notify: Verify API Server
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index 67f697924..87ceb8103 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -33,9 +33,9 @@ Notes
Currently we support re-labeling nodes but we don't re-schedule running pods nor remove existing labels. That means you will have to trigger the re-schedulling manually. To re-schedule your pods, just follow the steps below:
```
-oadm manage-node --schedulable=false ${NODE}
-oadm manage-node --drain ${NODE}
-oadm manage-node --schedulable=true ${NODE}
+oc adm manage-node --schedulable=false ${NODE}
+oc adm manage-node --drain ${NODE}
+oc adm manage-node --schedulable=true ${NODE}
````
> If you are using version less than 1.5/3.5 you must replace `--drain` with `--evacuate`.
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index f3867fe4a..fff927944 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -14,7 +14,11 @@ r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }
l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}"
-openshift_service_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'atomic-openshift' }}"
+openshift_service_type_dict:
+ origin: origin
+ openshift-enterprise: atomic-openshift
+
+openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
system_images_registry_dict:
openshift-enterprise: "registry.access.redhat.com"
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 229c6bbed..1d9797f84 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -4,11 +4,15 @@
name: NetworkManager
state: restarted
enabled: True
+ when:
+ - (not skip_node_svc_handlers | default(False) | bool)
- name: restart dnsmasq
systemd:
name: dnsmasq
state: restarted
+ when:
+ - (not skip_node_svc_handlers | default(False) | bool)
- name: restart openvswitch
systemd:
@@ -34,7 +38,7 @@
- name: restart node
systemd:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
state: restarted
register: l_openshift_node_restart_node_result
until: not l_openshift_node_restart_node_result | failed
@@ -47,3 +51,5 @@
- name: reload systemd units
command: systemctl daemon-reload
+ when:
+ - (not skip_node_svc_handlers | default(False) | bool)
diff --git a/roles/openshift_node/tasks/aws.yml b/roles/openshift_node/tasks/aws.yml
index 38c2b794d..a7f1fc116 100644
--- a/roles/openshift_node/tasks/aws.yml
+++ b/roles/openshift_node/tasks/aws.yml
@@ -1,7 +1,7 @@
---
- name: Configure AWS Cloud Provider Settings
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ dest: /etc/sysconfig/{{ openshift_service_type }}-node
regexp: "{{ item.regex }}"
line: "{{ item.line }}"
create: true
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index 741a2234f..5d66de0a3 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -2,6 +2,10 @@
- name: Install the systemd units
include_tasks: systemd_units.yml
+- name: Pull container images
+ include_tasks: container_images.yml
+ when: openshift.common.is_containerized | bool
+
- name: Start and enable openvswitch service
systemd:
name: openvswitch.service
@@ -38,7 +42,7 @@
- name: Configure Node Environment Variables
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ dest: /etc/sysconfig/{{ openshift_service_type }}-node
regexp: "^{{ item.key }}="
line: "{{ item.key }}={{ item.value }}"
create: true
@@ -76,7 +80,7 @@
- name: Start and enable node dep
systemd:
daemon_reload: yes
- name: "{{ openshift.common.service_type }}-node-dep"
+ name: "{{ openshift_service_type }}-node-dep"
enabled: yes
state: started
@@ -84,7 +88,7 @@
block:
- name: Start and enable node
systemd:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
enabled: yes
state: started
daemon_reload: yes
@@ -95,7 +99,7 @@
ignore_errors: true
- name: Dump logs from node service if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-node
+ command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-node
when: node_start_result | failed
- name: Abort if node failed to start
diff --git a/roles/openshift_node/tasks/config/configure-node-settings.yml b/roles/openshift_node/tasks/config/configure-node-settings.yml
index 527580481..ebc1426d3 100644
--- a/roles/openshift_node/tasks/config/configure-node-settings.yml
+++ b/roles/openshift_node/tasks/config/configure-node-settings.yml
@@ -1,7 +1,7 @@
---
- name: Configure Node settings
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ dest: /etc/sysconfig/{{ openshift_service_type }}-node
regexp: "{{ item.regex }}"
line: "{{ item.line }}"
create: true
diff --git a/roles/openshift_node/tasks/config/configure-proxy-settings.yml b/roles/openshift_node/tasks/config/configure-proxy-settings.yml
index d60794305..7ddd319d2 100644
--- a/roles/openshift_node/tasks/config/configure-proxy-settings.yml
+++ b/roles/openshift_node/tasks/config/configure-proxy-settings.yml
@@ -1,7 +1,7 @@
---
- name: Configure Proxy Settings
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ dest: /etc/sysconfig/{{ openshift_service_type }}-node
regexp: "{{ item.regex }}"
line: "{{ item.line }}"
create: true
diff --git a/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml
index ee91a88ab..9f1145d12 100644
--- a/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml
+++ b/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml
@@ -1,7 +1,7 @@
---
- name: Install Node dependencies docker service file
template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node-dep.service"
+ dest: "/etc/systemd/system/{{ openshift_service_type }}-node-dep.service"
src: openshift.docker.node.dep.service
notify:
- reload systemd units
diff --git a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
index f92ff79b5..649fc5f6b 100644
--- a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
+++ b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
@@ -1,7 +1,7 @@
---
- name: Install Node docker service file
template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service"
src: openshift.docker.node.service
notify:
- reload systemd units
diff --git a/roles/openshift_node/tasks/container_images.yml b/roles/openshift_node/tasks/container_images.yml
new file mode 100644
index 000000000..0b8c806ae
--- /dev/null
+++ b/roles/openshift_node/tasks/container_images.yml
@@ -0,0 +1,20 @@
+---
+- name: Install Node system container
+ include_tasks: node_system_container.yml
+ when:
+ - l_is_node_system_container | bool
+
+- name: Install OpenvSwitch system containers
+ include_tasks: openvswitch_system_container.yml
+ when:
+ - openshift_node_use_openshift_sdn | bool
+ - l_is_openvswitch_system_container | bool
+
+- name: Pre-pull openvswitch image
+ command: >
+ docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+ when:
+ - openshift_node_use_openshift_sdn | bool
+ - not l_is_openvswitch_system_container | bool
diff --git a/roles/openshift_node/tasks/dnsmasq.yml b/roles/openshift_node/tasks/dnsmasq.yml
index f210a3a21..31ca46ec0 100644
--- a/roles/openshift_node/tasks/dnsmasq.yml
+++ b/roles/openshift_node/tasks/dnsmasq.yml
@@ -1,43 +1,4 @@
---
-- name: Check for NetworkManager service
- command: >
- systemctl show NetworkManager
- register: nm_show
- changed_when: false
- ignore_errors: True
-
-- name: Set fact using_network_manager
- set_fact:
- network_manager_active: "{{ True if 'ActiveState=active' in nm_show.stdout else False }}"
-
-- name: Install dnsmasq
- package: name=dnsmasq state=installed
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
-
-- name: ensure origin/node directory exists
- file:
- state: directory
- path: "{{ item }}"
- owner: root
- group: root
- mode: '0700'
- with_items:
- - /etc/origin
- - /etc/origin/node
-
-# this file is copied to /etc/dnsmasq.d/ when the node starts and is removed
-# when the node stops. A dbus-message is sent to dnsmasq to add the same entries
-# so that dnsmasq doesn't need to be restarted. Once we can use dnsmasq 2.77 or
-# newer we can use --server-file option to update the servers dynamically and
-# reload them by sending dnsmasq a SIGHUP. We write the file in case someone else
-# triggers a restart of dnsmasq but not a node restart.
-- name: Install node-dnsmasq.conf
- template:
- src: node-dnsmasq.conf.j2
- dest: /etc/origin/node/node-dnsmasq.conf
-
- name: Install dnsmasq configuration
template:
src: origin-dns.conf.j2
@@ -63,7 +24,3 @@
# Dynamic NetworkManager based dispatcher
- include_tasks: dnsmasq/network-manager.yml
when: network_manager_active | bool
-
-# Relies on ansible in order to configure static config
-- include_tasks: dnsmasq/no-network-manager.yml
- when: not network_manager_active | bool
diff --git a/roles/openshift_node/tasks/dnsmasq_install.yml b/roles/openshift_node/tasks/dnsmasq_install.yml
new file mode 100644
index 000000000..9f66bf12d
--- /dev/null
+++ b/roles/openshift_node/tasks/dnsmasq_install.yml
@@ -0,0 +1,43 @@
+---
+- name: Check for NetworkManager service
+ command: >
+ systemctl show NetworkManager
+ register: nm_show
+ changed_when: false
+ ignore_errors: True
+
+- name: Set fact using_network_manager
+ set_fact:
+ network_manager_active: "{{ True if 'ActiveState=active' in nm_show.stdout else False }}"
+
+- name: Install dnsmasq
+ package: name=dnsmasq state=installed
+ when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
+
+- name: ensure origin/node directory exists
+ file:
+ state: directory
+ path: "{{ item }}"
+ owner: root
+ group: root
+ mode: '0700'
+ with_items:
+ - /etc/origin
+ - /etc/origin/node
+
+# this file is copied to /etc/dnsmasq.d/ when the node starts and is removed
+# when the node stops. A dbus-message is sent to dnsmasq to add the same entries
+# so that dnsmasq doesn't need to be restarted. Once we can use dnsmasq 2.77 or
+# newer we can use --server-file option to update the servers dynamically and
+# reload them by sending dnsmasq a SIGHUP. We write the file in case someone else
+# triggers a restart of dnsmasq but not a node restart.
+- name: Install node-dnsmasq.conf
+ template:
+ src: node-dnsmasq.conf.j2
+ dest: /etc/origin/node/node-dnsmasq.conf
+
+# Relies on ansible in order to configure static config
+- include_tasks: dnsmasq/no-network-manager.yml
+ when: not network_manager_active | bool
diff --git a/roles/openshift_node/tasks/docker/upgrade.yml b/roles/openshift_node/tasks/docker/upgrade.yml
deleted file mode 100644
index d743d2188..000000000
--- a/roles/openshift_node/tasks/docker/upgrade.yml
+++ /dev/null
@@ -1,42 +0,0 @@
----
-# input variables:
-# - openshift.common.service_type
-# - openshift.common.is_containerized
-# - docker_upgrade_nuke_images
-# - docker_version
-# - skip_docker_restart
-
-- name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
-
-- debug: var=docker_image_count.stdout
-
-# TODO(jchaloup): put all docker_upgrade_nuke_images into a block with only one condition
-- name: Remove all containers and images
- script: nuke_images.sh
- register: nuke_images_result
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- service:
- name: docker
- state: stopped
- register: l_openshift_node_upgrade_docker_stop_result
- until: not l_openshift_node_upgrade_docker_stop_result | failed
- retries: 3
- delay: 30
-
-- name: Upgrade Docker
- package: name=docker{{ '-' + docker_version }} state=present
- register: result
- until: result | success
-
-# starting docker happens back in ../main.yml where it calls ../restart.yml
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
index 1ed4a05c1..f93aed246 100644
--- a/roles/openshift_node/tasks/install.yml
+++ b/roles/openshift_node/tasks/install.yml
@@ -3,14 +3,14 @@
block:
- name: Install Node package
package:
- name: "{{ openshift.common.service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift_service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
register: result
until: result | success
- name: Install sdn-ovs package
package:
- name: "{{ openshift.common.service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift_service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
when:
- openshift_node_use_openshift_sdn | bool
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index ae4916761..946deb4d3 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -6,6 +6,7 @@
- deployment_type == 'openshift-enterprise'
- not openshift_use_crio
+- include_tasks: dnsmasq_install.yml
- include_tasks: dnsmasq.yml
- name: setup firewall
@@ -50,6 +51,8 @@
enabled: yes
state: restarted
when: openshift_use_crio
+ register: task_result
+ failed_when: task_result|failed and 'could not find the requested service' not in task_result.msg|lower
- name: restart NetworkManager to ensure resolv.conf is present
systemd:
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index eb8d9a6a5..98978ec6f 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -1,8 +1,4 @@
---
-- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
- name: Pre-pull node system container image
command: >
@@ -12,10 +8,10 @@
- name: Install or Update node system container
oc_atomic_container:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}"
values:
- "DNS_DOMAIN={{ openshift.common.dns_domain }}"
- "DOCKER_SERVICE={{ openshift_docker_service_name }}.service"
- - "MASTER_SERVICE={{ openshift.common.service_type }}.service"
+ - "MASTER_SERVICE={{ openshift_service_type }}.service"
state: latest
diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml
index d33e172c1..b61bc84c1 100644
--- a/roles/openshift_node/tasks/openvswitch_system_container.yml
+++ b/roles/openshift_node/tasks/openvswitch_system_container.yml
@@ -7,11 +7,6 @@
l_service_name: "{{ openshift_docker_service_name }}"
when: not openshift_use_crio
-- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
-
- name: Pre-pull OpenVSwitch system container image
command: >
atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 397e1ba18..262ee698b 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -1,7 +1,7 @@
---
- name: Install Node service file
template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service"
src: "{{ openshift.common.is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}"
when: not l_is_node_system_container | bool
notify:
@@ -16,29 +16,10 @@
- name: include ovs service environment file
include_tasks: config/install-ovs-service-env-file.yml
- - name: Install Node system container
- include_tasks: node_system_container.yml
- when:
- - l_is_node_system_container | bool
-
- - name: Install OpenvSwitch system containers
- include_tasks: openvswitch_system_container.yml
+ - include_tasks: config/install-ovs-docker-service-file.yml
when:
- openshift_node_use_openshift_sdn | bool
- - l_is_openvswitch_system_container | bool
-
-- block:
- - name: Pre-pull openvswitch image
- command: >
- docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
-
- - include_tasks: config/install-ovs-docker-service-file.yml
- when:
- - openshift.common.is_containerized | bool
- - openshift_node_use_openshift_sdn | bool
- - not l_is_openvswitch_system_container | bool
+ - not l_is_openvswitch_system_container | bool
- include_tasks: config/configure-node-settings.yml
- include_tasks: config/configure-proxy-settings.yml
diff --git a/roles/openshift_node/tasks/upgrade.yml b/roles/openshift_node/tasks/upgrade.yml
index 561b56918..87556533a 100644
--- a/roles/openshift_node/tasks/upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade.yml
@@ -10,161 +10,29 @@
# tasks file for openshift_node_upgrade
-- include_tasks: registry_auth.yml
+- name: stop services for upgrade
+ include_tasks: upgrade/stop_services.yml
-- name: Stop node and openvswitch services
- service:
- name: "{{ item }}"
- state: stopped
- with_items:
- - "{{ openshift.common.service_type }}-node"
- - openvswitch
- failed_when: false
-
-- name: Stop additional containerized services
- service:
- name: "{{ item }}"
- state: stopped
- with_items:
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-master-api"
- - etcd_container
- failed_when: false
- when: openshift.common.is_containerized | bool
-
-- name: Pre-pull node image
- command: >
- docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
- when: openshift.common.is_containerized | bool
-
-- name: Pre-pull openvswitch image
- command: >
- docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
- when:
- - openshift.common.is_containerized | bool
- - openshift_use_openshift_sdn | bool
-
-- include_tasks: docker/upgrade.yml
- vars:
- # We will restart Docker ourselves after everything is ready:
- skip_docker_restart: True
+# Ensure actually install latest package.
+- name: download docker upgrade rpm
+ command: "{{ ansible_pkg_mgr }} install -C -y docker{{ '-' + docker_version }}"
+ register: result
+ until: result | success
when:
- l_docker_upgrade is defined
- l_docker_upgrade | bool
-- include_tasks: "{{ node_config_hook }}"
- when: node_config_hook is defined
-
-- include_tasks: upgrade/rpm_upgrade.yml
+- name: install pre-pulled rpms.
+ include_tasks: upgrade/rpm_upgrade_install.yml
vars:
- component: "node"
openshift_version: "{{ openshift_pkg_version | default('') }}"
when: not openshift.common.is_containerized | bool
-- name: Remove obsolete docker-sdn-ovs.conf
- file:
- path: "/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf"
- state: absent
-
-- include_tasks: upgrade/containerized_node_upgrade.yml
- when: openshift.common.is_containerized | bool
-
-- name: Ensure containerized services stopped before Docker restart
- service:
- name: "{{ item }}"
- state: stopped
- with_items:
- - etcd_container
- - openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
- failed_when: false
- when: openshift.common.is_containerized | bool
-
-- name: Stop rpm based services
- service:
- name: "{{ item }}"
- state: stopped
- with_items:
- - "{{ openshift.common.service_type }}-node"
- - openvswitch
- failed_when: false
- when: not openshift.common.is_containerized | bool
-
-# https://bugzilla.redhat.com/show_bug.cgi?id=1513054
-- name: Clean up dockershim data
- file:
- path: "/var/lib/dockershim/sandbox/"
- state: absent
-- name: Upgrade openvswitch
- package:
- name: openvswitch
- state: latest
- when: not openshift.common.is_containerized | bool
- register: result
- until: result | success
-
-- name: Update oreg value
- yedit:
- src: "{{ openshift.common.config_base }}/node/node-config.yaml"
- key: 'imageConfig.format'
- value: "{{ oreg_url | default(oreg_url_node) }}"
- when: oreg_url is defined or oreg_url_node is defined
-
-# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
-- name: Check for swap usage
- command: grep "^[^#].*swap" /etc/fstab
- # grep: match any lines which don't begin with '#' and contain 'swap'
- changed_when: false
- failed_when: false
- register: swap_result
-
- # Disable Swap Block
-- block:
-
- - name: Disable swap
- command: swapoff --all
-
- - name: Remove swap entries from /etc/fstab
- replace:
- dest: /etc/fstab
- regexp: '(^[^#].*swap.*)'
- replace: '# \1'
- backup: yes
-
- - name: Add notice about disabling swap
- lineinfile:
- dest: /etc/fstab
- line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines'
- state: present
-
- when:
- - swap_result.stdout_lines | length > 0
- - openshift_disable_swap | default(true) | bool
- # End Disable Swap Block
-
-- name: Reset selinux context
- command: restorecon -RF {{ openshift_node_data_dir }}/openshift.local.volumes
- when:
- - ansible_selinux is defined
- - ansible_selinux.status == 'enabled'
+- include_tasks: "{{ node_config_hook }}"
+ when: node_config_hook is defined
-- name: Apply 3.6 dns config changes
- yedit:
- src: /etc/origin/node/node-config.yaml
- key: "{{ item.key }}"
- value: "{{ item.value }}"
- with_items:
- - key: "dnsBindAddress"
- value: "127.0.0.1:53"
- - key: "dnsRecursiveResolvConf"
- value: "/etc/origin/node/resolv.conf"
+- include_tasks: upgrade/config_changes.yml
# Restart all services
- include_tasks: upgrade/restart.yml
@@ -181,4 +49,7 @@
retries: 24
delay: 5
+- include_tasks: dnsmasq_install.yml
- include_tasks: dnsmasq.yml
+
+- meta: flush_handlers
diff --git a/roles/openshift_node/tasks/upgrade/config_changes.yml b/roles/openshift_node/tasks/upgrade/config_changes.yml
new file mode 100644
index 000000000..e22018e6d
--- /dev/null
+++ b/roles/openshift_node/tasks/upgrade/config_changes.yml
@@ -0,0 +1,77 @@
+---
+- name: Update systemd units
+ include_tasks: ../systemd_units.yml
+ when: openshift.common.is_containerized
+
+- name: Update oreg value
+ yedit:
+ src: "{{ openshift.common.config_base }}/node/node-config.yaml"
+ key: 'imageConfig.format'
+ value: "{{ oreg_url | default(oreg_url_node) }}"
+ when: oreg_url is defined or oreg_url_node is defined
+
+- name: Remove obsolete docker-sdn-ovs.conf
+ file:
+ path: "/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf"
+ state: absent
+
+# https://bugzilla.redhat.com/show_bug.cgi?id=1513054
+- name: Clean up dockershim data
+ file:
+ path: "/var/lib/dockershim/sandbox/"
+ state: absent
+
+# Disable Swap Block (pre)
+- block:
+ - name: Remove swap entries from /etc/fstab
+ replace:
+ dest: /etc/fstab
+ regexp: '(^[^#].*swap.*)'
+ replace: '# \1'
+ backup: yes
+
+ - name: Add notice about disabling swap
+ lineinfile:
+ dest: /etc/fstab
+ line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines'
+ state: present
+
+ - name: Disable swap
+ command: swapoff --all
+
+ when:
+ - openshift_node_upgrade_swap_result | default(False) | bool
+ - openshift_disable_swap | default(true) | bool
+# End Disable Swap Block
+
+- name: Apply 3.6 dns config changes
+ yedit:
+ src: /etc/origin/node/node-config.yaml
+ key: "{{ item.key }}"
+ value: "{{ item.value }}"
+ with_items:
+ - key: "dnsBindAddress"
+ value: "127.0.0.1:53"
+ - key: "dnsRecursiveResolvConf"
+ value: "/etc/origin/node/resolv.conf"
+
+- name: Install Node service file
+ template:
+ dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service"
+ src: "node.service.j2"
+ register: l_node_unit
+
+- name: Reset selinux context
+ command: restorecon -RF {{ openshift_node_data_dir }}/openshift.local.volumes
+ when:
+ - ansible_selinux is defined
+ - ansible_selinux.status == 'enabled'
+
+# NOTE: This is needed to make sure we are using the correct set
+# of systemd unit files. The RPMs lay down defaults but
+# the install/upgrade may override them in /etc/systemd/system/.
+# NOTE: We don't use the systemd module as some versions of the module
+# require a service to be part of the call.
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: l_node_unit | changed
diff --git a/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml b/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml
deleted file mode 100644
index 245de60a7..000000000
--- a/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# This is a hack to allow us to use systemd_units.yml, but skip the handlers which
-# restart services. We will unconditionally restart all containerized services
-# because we have to unconditionally restart Docker:
-- set_fact:
- skip_node_svc_handlers: True
-
-- name: Update systemd units
- include_tasks: ../systemd_units.yml
-
-# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of
-# play when the node has already been marked schedulable again. (this would look strange
-# in logs otherwise)
-- meta: flush_handlers
diff --git a/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml b/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml
new file mode 100644
index 000000000..71f00dcd2
--- /dev/null
+++ b/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml
@@ -0,0 +1,15 @@
+---
+- name: Pre-pull node image
+ command: >
+ docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+
+- name: Pre-pull openvswitch image
+ command: >
+ docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+ when: openshift_use_openshift_sdn | bool
+
+- include_tasks: ../container_images.yml
diff --git a/roles/openshift_node/tasks/upgrade/restart.yml b/roles/openshift_node/tasks/upgrade/restart.yml
index 3f1abceab..717cfa712 100644
--- a/roles/openshift_node/tasks/upgrade/restart.yml
+++ b/roles/openshift_node/tasks/upgrade/restart.yml
@@ -1,6 +1,6 @@
---
# input variables:
-# - openshift.common.service_type
+# - openshift_service_type
# - openshift.common.is_containerized
# - openshift.common.hostname
# - openshift.master.api_port
@@ -13,6 +13,15 @@
- name: Reload systemd to ensure latest unit files
command: systemctl daemon-reload
+- name: Restart support services
+ service:
+ name: "{{ item }}"
+ state: restarted
+ enabled: True
+ with_items:
+ - NetworkManager
+ - dnsmasq
+
- name: Restart container runtime
service:
name: "{{ openshift_docker_service_name }}"
@@ -27,9 +36,9 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
failed_when: false
- name: Wait for master API to come back online
diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
index fcbe1a598..d2864e6b8 100644
--- a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
@@ -1,33 +1,24 @@
---
# input variables:
-# - openshift.common.service_type
+# - openshift_service_type
# - component
# - openshift_pkg_version
# - openshift.common.is_atomic
-# We verified latest rpm available is suitable, so just yum update.
-- name: Upgrade packages
- package: "name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present"
+# Pre-pull new node rpm, but don't install
+- name: download new node packages
+ command: "{{ ansible_pkg_mgr }} install -y --downloadonly {{ openshift_node_upgrade_rpm_list | join(' ')}}"
register: result
until: result | success
+ vars:
+ openshift_node_upgrade_rpm_list:
+ - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
+ - "PyYAML"
+ - "dnsmasq"
-- name: Ensure python-yaml present for config upgrade
- package: name=PyYAML state=present
- when: not openshift.common.is_atomic | bool
+# Pre-pull the rpms for openvswitch, but don't install
+# openvswitch requires the latest version to be installed.
+- name: download openvswitch upgrade rpm
+ command: "{{ ansible_pkg_mgr }} update -y --downloadonly openvswitch"
register: result
until: result | success
-
-- name: Install Node service file
- template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
- src: "node.service.j2"
- register: l_node_unit
-
-# NOTE: This is needed to make sure we are using the correct set
-# of systemd unit files. The RPMs lay down defaults but
-# the install/upgrade may override them in /etc/systemd/system/.
-# NOTE: We don't use the systemd module as some versions of the module
-# require a service to be part of the call.
-- name: Reload systemd units
- command: systemctl daemon-reload
- when: l_node_unit | changed
diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml
new file mode 100644
index 000000000..6390be558
--- /dev/null
+++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml
@@ -0,0 +1,19 @@
+---
+# input variables:
+# - openshift_service_type
+# - component
+# - openshift_pkg_version
+# - openshift.common.is_atomic
+
+# Install the pre-pulled RPM
+# Note: dnsmasq is covered in it's own play. openvswitch is included here
+# because once we have the latest rpm downloaded, it will happily be installed.
+- name: download new node packages
+ command: "{{ ansible_pkg_mgr }} install -C -y {{ openshift_node_upgrade_rpm_list | join(' ')}}"
+ register: result
+ until: result | success
+ vars:
+ openshift_node_upgrade_rpm_list:
+ - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
+ - "PyYAML"
+ - "openvswitch"
diff --git a/roles/openshift_node/tasks/upgrade/stop_services.yml b/roles/openshift_node/tasks/upgrade/stop_services.yml
new file mode 100644
index 000000000..bbf1c5f25
--- /dev/null
+++ b/roles/openshift_node/tasks/upgrade/stop_services.yml
@@ -0,0 +1,43 @@
+---
+- name: Stop node and openvswitch services
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items:
+ - "{{ openshift_service_type }}-node"
+ - openvswitch
+ failed_when: false
+
+- name: Ensure containerized services stopped before Docker restart
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items:
+ - etcd_container
+ - openvswitch
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+
+- service:
+ name: docker
+ state: stopped
+ register: l_openshift_node_upgrade_docker_stop_result
+ until: not l_openshift_node_upgrade_docker_stop_result | failed
+ retries: 3
+ delay: 30
+ when:
+ - l_docker_upgrade is defined
+ - l_docker_upgrade | bool
+
+- name: Stop rpm based services
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items:
+ - "{{ openshift_service_type }}-node"
+ - openvswitch
+ failed_when: false
+ when: not openshift.common.is_containerized | bool
diff --git a/roles/openshift_node/tasks/upgrade_pre.yml b/roles/openshift_node/tasks/upgrade_pre.yml
new file mode 100644
index 000000000..3346b7c65
--- /dev/null
+++ b/roles/openshift_node/tasks/upgrade_pre.yml
@@ -0,0 +1,56 @@
+---
+# This is a hack to allow us to update various components without restarting
+# services. This will persist into the upgrade play as well, so everything
+# needs to be restarted by hand.
+- set_fact:
+ skip_node_svc_handlers: True
+
+- include_tasks: registry_auth.yml
+
+- name: update package meta data to speed install later.
+ command: "{{ ansible_pkg_mgr }} makecache"
+ register: result
+ until: result | success
+ when: not openshift.common.is_containerized | bool
+
+- name: Check Docker image count
+ shell: "docker images -aq | wc -l"
+ register: docker_image_count
+ when:
+ - l_docker_upgrade is defined
+ - l_docker_upgrade | bool
+
+- debug: var=docker_image_count.stdout
+ when:
+ - l_docker_upgrade is defined
+ - l_docker_upgrade | bool
+
+- include_tasks: upgrade/containerized_upgrade_pull.yml
+ when: openshift.common.is_containerized | bool
+
+# Prepull the rpms for docker upgrade, but don't install
+- name: download docker upgrade rpm
+ command: "{{ ansible_pkg_mgr }} install -y --downloadonly docker{{ '-' + docker_version }}"
+ register: result
+ until: result | success
+ when:
+ - l_docker_upgrade is defined
+ - l_docker_upgrade | bool
+
+- include_tasks: upgrade/rpm_upgrade.yml
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ when: not openshift.common.is_containerized | bool
+
+# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
+- name: Check for swap usage
+ command: grep "^[^#].*swap" /etc/fstab
+ # grep: match any lines which don't begin with '#' and contain 'swap'
+ changed_when: false
+ failed_when: false
+ register: swap_result
+
+# Set this fact here so we can use it during the next play, which is serial.
+- name: set_fact swap_result
+ set_fact:
+ openshift_node_upgrade_swap_result: "{{ swap_result.stdout_lines | length > 0 | bool }}"
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 16fdde02e..261cac6f1 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -20,9 +20,9 @@ kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yam
container-runtime:
- remote
container-runtime-endpoint:
- - /var/run/crio.sock
+ - /var/run/crio/crio.sock
image-service-endpoint:
- - /var/run/crio.sock
+ - /var/run/crio/crio.sock
node-labels:
- router=true
- registry=true
diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service
index 5964ac095..8b43beb07 100644
--- a/roles/openshift_node/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node/templates/openshift.docker.node.dep.service
@@ -1,11 +1,11 @@
[Unit]
Requires={{ openshift_docker_service_name }}.service
After={{ openshift_docker_service_name }}.service
-PartOf={{ openshift.common.service_type }}-node.service
-Before={{ openshift.common.service_type }}-node.service
+PartOf={{ openshift_service_type }}-node.service
+Before={{ openshift_service_type }}-node.service
{% if openshift_use_crio %}Wants=cri-o.service{% endif %}
[Service]
-ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
+ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift_service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift_service_type }}-node-dep; fi"
ExecStop=
-SyslogIdentifier={{ openshift.common.service_type }}-node-dep
+SyslogIdentifier={{ openshift_service_type }}-node-dep
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index 3b33ca542..b174c7023 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -1,5 +1,5 @@
[Unit]
-After={{ openshift.common.service_type }}-master.service
+After={{ openshift_service_type }}-master.service
After={{ openshift_docker_service_name }}.service
After=openvswitch.service
PartOf={{ openshift_docker_service_name }}.service
@@ -10,20 +10,20 @@ PartOf=openvswitch.service
After=ovsdb-server.service
After=ovs-vswitchd.service
{% endif %}
-Wants={{ openshift.common.service_type }}-master.service
-Requires={{ openshift.common.service_type }}-node-dep.service
-After={{ openshift.common.service_type }}-node-dep.service
+Wants={{ openshift_service_type }}-master.service
+Requires={{ openshift_service_type }}-node-dep.service
+After={{ openshift_service_type }}-node-dep.service
Requires=dnsmasq.service
After=dnsmasq.service
[Service]
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
-ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-node
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-node-dep
+ExecStartPre=-/usr/bin/docker rm -f {{ openshift_service_type }}-node
ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \
- --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \
+ExecStart=/usr/bin/docker run --name {{ openshift_service_type }}-node \
+ --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift_service_type }}-node \
-v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \
-e HOST=/rootfs -e HOST_ETC=/host-etc \
-v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}:rslave \
@@ -40,10 +40,10 @@ ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \
{% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
{{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
-ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
+ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-node
ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
ExecStopPost=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:
-SyslogIdentifier={{ openshift.common.service_type }}-node
+SyslogIdentifier={{ openshift_service_type }}-node
Restart=always
RestartSec=5s
diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml
index 929b76f54..65a647b8f 100644
--- a/roles/openshift_openstack/defaults/main.yml
+++ b/roles/openshift_openstack/defaults/main.yml
@@ -44,6 +44,9 @@ openshift_openstack_container_storage_setup:
# populate-dns
openshift_openstack_dns_records_add: []
+openshift_openstack_public_hostname_suffix: ""
+openshift_openstack_private_hostname_suffix: ""
+openshift_openstack_public_dns_domain: "example.com"
openshift_openstack_full_dns_domain: "{{ (openshift_openstack_clusterid|trim == '') | ternary(openshift_openstack_public_dns_domain, openshift_openstack_clusterid + '.' + openshift_openstack_public_dns_domain) }}"
openshift_openstack_app_subdomain: "apps"
diff --git a/roles/openshift_openstack/tasks/populate-dns.yml b/roles/openshift_openstack/tasks/populate-dns.yml
index eae4967f7..cf2ead5c3 100644
--- a/roles/openshift_openstack/tasks/populate-dns.yml
+++ b/roles/openshift_openstack/tasks/populate-dns.yml
@@ -1,7 +1,7 @@
---
- name: "Generate list of private A records"
set_fact:
- private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['private_v4'] } ] }}"
+ private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'] + openshift_openstack_private_hostname_suffix, 'ip': hostvars[item]['private_v4'] } ] }}"
with_items: "{{ groups['cluster_hosts'] }}"
- name: "Add wildcard records to the private A records for infrahosts"
@@ -48,7 +48,7 @@
- name: "Generate list of public A records"
set_fact:
- public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['public_v4'] } ] }}"
+ public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'] + openshift_openstack_public_hostname_suffix, 'ip': hostvars[item]['public_v4'] } ] }}"
with_items: "{{ groups['cluster_hosts'] }}"
when: hostvars[item]['public_v4'] is defined
diff --git a/roles/openshift_prometheus/README.md b/roles/openshift_prometheus/README.md
index f1eca1da6..1ebeacabf 100644
--- a/roles/openshift_prometheus/README.md
+++ b/roles/openshift_prometheus/README.md
@@ -38,11 +38,13 @@ openshift_prometheus_args=['--storage.tsdb.retention=6h', '--storage.tsdb.min-bl
Each prometheus component (prometheus, alertmanager, alertbuffer) can set pv claim by setting corresponding role variable:
```
openshift_prometheus_<COMPONENT>_storage_type: <VALUE> (pvc, emptydir)
+openshift_prometheus_<COMPONENT>_storage_class: <VALUE>
openshift_prometheus_<COMPONENT>_pvc_(name|size|access_modes|pv_selector): <VALUE>
```
e.g
```
openshift_prometheus_storage_type: pvc
+openshift_prometheus_storage_class: glusterfs-storage
openshift_prometheus_alertmanager_pvc_name: alertmanager
openshift_prometheus_alertbuffer_pvc_size: 10G
openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml
index df331a4bb..e30108d2c 100644
--- a/roles/openshift_prometheus/defaults/main.yaml
+++ b/roles/openshift_prometheus/defaults/main.yaml
@@ -23,6 +23,7 @@ openshift_prometheus_pvc_name: prometheus
openshift_prometheus_pvc_size: "{{ openshift_prometheus_storage_volume_size | default('10Gi') }}"
openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
openshift_prometheus_pvc_pv_selector: "{{ openshift_prometheus_storage_labels | default({}) }}"
+openshift_prometheus_sc_name: "{{ openshift_prometheus_storage_class | default(None) }}"
# One of ['emptydir', 'pvc']
openshift_prometheus_alertmanager_storage_type: "emptydir"
@@ -30,6 +31,7 @@ openshift_prometheus_alertmanager_pvc_name: prometheus-alertmanager
openshift_prometheus_alertmanager_pvc_size: "{{ openshift_prometheus_alertmanager_storage_volume_size | default('10Gi') }}"
openshift_prometheus_alertmanager_pvc_access_modes: [ReadWriteOnce]
openshift_prometheus_alertmanager_pvc_pv_selector: "{{ openshift_prometheus_alertmanager_storage_labels | default({}) }}"
+openshift_prometheus_alertmanager_sc_name: "{{ openshift_prometheus_alertmanager_storage_class | default(None) }}"
# One of ['emptydir', 'pvc']
openshift_prometheus_alertbuffer_storage_type: "emptydir"
@@ -37,6 +39,7 @@ openshift_prometheus_alertbuffer_pvc_name: prometheus-alertbuffer
openshift_prometheus_alertbuffer_pvc_size: "{{ openshift_prometheus_alertbuffer_storage_volume_size | default('10Gi') }}"
openshift_prometheus_alertbuffer_pvc_access_modes: [ReadWriteOnce]
openshift_prometheus_alertbuffer_pvc_pv_selector: "{{ openshift_prometheus_alertbuffer_storage_labels | default({}) }}"
+openshift_prometheus_alertbuffer_sc_name: "{{ openshift_prometheus_alertbuffer_storage_class | default(None) }}"
# container resources
openshift_prometheus_cpu_limit: null
diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml
index ad15dc65f..abc5dd476 100644
--- a/roles/openshift_prometheus/tasks/install_prometheus.yaml
+++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml
@@ -131,6 +131,7 @@
access_modes: "{{ openshift_prometheus_pvc_access_modes }}"
volume_capacity: "{{ openshift_prometheus_pvc_size }}"
selector: "{{ openshift_prometheus_pvc_pv_selector }}"
+ storage_class_name: "{{ openshift_prometheus_sc_name }}"
when: openshift_prometheus_storage_type == 'pvc'
- name: create alertmanager pvc
@@ -140,6 +141,7 @@
access_modes: "{{ openshift_prometheus_alertmanager_pvc_access_modes }}"
volume_capacity: "{{ openshift_prometheus_alertmanager_pvc_size }}"
selector: "{{ openshift_prometheus_alertmanager_pvc_pv_selector }}"
+ storage_class_name: "{{ openshift_prometheus_alertmanager_sc_name }}"
when: openshift_prometheus_alertmanager_storage_type == 'pvc'
- name: create alertbuffer pvc
@@ -149,6 +151,7 @@
access_modes: "{{ openshift_prometheus_alertbuffer_pvc_access_modes }}"
volume_capacity: "{{ openshift_prometheus_alertbuffer_pvc_size }}"
selector: "{{ openshift_prometheus_alertbuffer_pvc_pv_selector }}"
+ storage_class_name: "{{ openshift_prometheus_alertbuffer_sc_name }}"
when: openshift_prometheus_alertbuffer_storage_type == 'pvc'
# prometheus configmap
diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml
index 6e8792446..e543d753c 100644
--- a/roles/openshift_provisioners/tasks/install_efs.yaml
+++ b/roles/openshift_provisioners/tasks/install_efs.yaml
@@ -66,7 +66,7 @@
- name: "Set anyuid permissions for efs"
command: >
- {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ {{ openshift.common.client_binary}} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy
add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs
register: efs_output
failed_when: efs_output.rc == 1 and 'exists' not in efs_output.stderr
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 5e7bde1e1..83954eaf8 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -37,6 +37,13 @@
- when: r_openshift_repos_has_run is not defined
block:
+ - include_tasks: rhel_repos.yml
+ when:
+ - ansible_distribution == 'RedHat'
+ - deployment_type == 'openshift-enterprise'
+ - (rhel_subscription_user or rhsub_user) is defined
+ - (rhel_subscription_password or rhsub_pass) is defined
+
- include_tasks: centos_repos.yml
when:
- ansible_os_family == "RedHat"
diff --git a/roles/openshift_repos/tasks/rhel_repos.yml b/roles/openshift_repos/tasks/rhel_repos.yml
new file mode 100644
index 000000000..c384cbe9a
--- /dev/null
+++ b/roles/openshift_repos/tasks/rhel_repos.yml
@@ -0,0 +1,34 @@
+---
+- name: Ensure RHEL rhui repositories are disabled
+ command: bash -c "yum -q --noplugins repolist | grep -v 'repo id' | grep 'rhui'"
+ register: repo_rhui
+ changed_when: "repo_rhui.rc != 1"
+ failed_when: repo_rhui.rc == 11
+
+- name: Disable RHEL rhui repositories
+ command: bash -c "yum-config-manager \
+ --disable 'rhui-REGION-client-config-server-7' \
+ --disable 'rhui-REGION-rhel-server-rh-common' \
+ --disable 'rhui-REGION-rhel-server-releases' \
+ --disable 'rhui-REGION-client-config-server-7'"
+ when: repo_rhui.changed
+
+- name: Ensure RHEL repositories are enabled
+ command: bash -c "yum -q --noplugins repolist | grep -v 'repo id' | grep 'Red Hat' | wc -l"
+ register: repo_rhel
+ changed_when: "'4' not in repo_rhel.stdout"
+ failed_when: repo_rhel.rc == 11
+
+- name: Disable all repositories
+ command: bash -c "subscription-manager repos --disable='*'"
+ when: repo_rhel.changed
+
+- name: Enable RHEL repositories
+ command: subscription-manager repos \
+ --enable="rhel-7-server-rpms" \
+ --enable="rhel-7-server-extras-rpms" \
+ --enable="rhel-7-server-ose-{{ (openshift_release | default('')).split('.')[0:2] | join('.') }}-rpms" \
+ --enable="rhel-7-fast-datapath-rpms"
+ register: subscribe_repos
+ until: subscribe_repos | succeeded
+ when: repo_rhel.changed
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index be749a2e1..f7bd58db3 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -82,6 +82,7 @@ GlusterFS cluster into a new or existing OpenShift cluster:
| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name
| openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels.
| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster
+| openshift_storage_glusterfs_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default
| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
| openshift_storage_glusterfs_block_deploy | True | Deploy glusterblock provisioner service
@@ -125,13 +126,14 @@ registry. These variables start with the prefix
values in their corresponding non-registry variables. The following variables
are an exception:
-| Name | Default value | Description |
-|-------------------------------------------------------|-----------------------|-----------------------------------------|
-| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs'
-| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters
-| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties
-| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above
-| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above
+| Name | Default value | Description |
+|-----------------------------------------------------------|-----------------------|-----------------------------------------|
+| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs'
+| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters
+| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties
+| openshift_storage_glusterfs_registry_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default
+| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above
+| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above
Additionally, this role's behavior responds to several registry-specific variables in the [openshift_hosted role](../openshift_hosted/README.md):
@@ -147,7 +149,6 @@ Dependencies
------------
* os_firewall
-* openshift_hosted_facts
* openshift_repos
* lib_openshift
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index b7b3c0db2..da34fab2a 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -5,6 +5,7 @@ openshift_storage_glusterfs_name: 'storage'
openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host"
openshift_storage_glusterfs_use_default_selector: False
openshift_storage_glusterfs_storageclass: True
+openshift_storage_glusterfs_storageclass_default: False
openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
openshift_storage_glusterfs_version: 'latest'
openshift_storage_glusterfs_block_deploy: True
@@ -51,6 +52,7 @@ openshift_storage_glusterfs_registry_name: 'registry'
openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host"
openshift_storage_glusterfs_registry_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}"
openshift_storage_glusterfs_registry_storageclass: False
+openshift_storage_glusterfs_registry_storageclass_default: False
openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}"
openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}"
openshift_storage_glusterfs_registry_block_deploy: "{{ openshift_storage_glusterfs_block_deploy }}"
diff --git a/roles/openshift_storage_glusterfs/meta/main.yml b/roles/openshift_storage_glusterfs/meta/main.yml
index 0cdd33880..6a4ef942b 100644
--- a/roles/openshift_storage_glusterfs/meta/main.yml
+++ b/roles/openshift_storage_glusterfs/meta/main.yml
@@ -10,6 +10,6 @@ galaxy_info:
versions:
- 7
dependencies:
-- role: openshift_hosted_facts
+- role: openshift_facts
- role: lib_openshift
- role: lib_os_firewall
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index 73b9791eb..2ea7286f3 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -7,6 +7,7 @@
glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}"
glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}"
glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}"
+ glusterfs_storageclass_default: "{{ openshift_storage_glusterfs_storageclass_default | bool }}"
glusterfs_image: "{{ openshift_storage_glusterfs_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_version }}"
glusterfs_block_deploy: "{{ openshift_storage_glusterfs_block_deploy | bool }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
index 30e83e79b..0c2fcb2c5 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -23,7 +23,7 @@
state: absent
labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
with_items: "{{ groups.all }}"
- when: glusterfs_wipe
+ when: "'openshift' in hostvars[item] and glusterfs_wipe"
- name: Delete pre-existing GlusterFS config
file:
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index 7466702b8..b7cff6514 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -7,6 +7,7 @@
glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}"
glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_registry_use_default_selector }}"
glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}"
+ glusterfs_storageclass_default: "{{ openshift_storage_glusterfs_registry_storageclass_default | bool }}"
glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}"
glusterfs_block_deploy: "{{ openshift_storage_glusterfs_registry_block_deploy | bool }}"
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2
index 454e84aaf..5ea17428f 100644
--- a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2
@@ -3,6 +3,10 @@ apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: glusterfs-{{ glusterfs_name }}
+{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
index 095fb780f..ca87807fe 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
@@ -3,6 +3,10 @@ apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: glusterfs-{{ glusterfs_name }}
+{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2
index 095fb780f..ca87807fe 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2
@@ -3,6 +3,10 @@ apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: glusterfs-{{ glusterfs_name }}
+{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2
index 095fb780f..ca87807fe 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2
@@ -3,6 +3,10 @@ apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: glusterfs-{{ glusterfs_name }}
+{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
diff --git a/roles/openshift_storage_nfs/meta/main.yml b/roles/openshift_storage_nfs/meta/main.yml
index 98f7c317e..d61e6873a 100644
--- a/roles/openshift_storage_nfs/meta/main.yml
+++ b/roles/openshift_storage_nfs/meta/main.yml
@@ -11,4 +11,4 @@ galaxy_info:
- 7
dependencies:
- role: lib_os_firewall
-- role: openshift_hosted_facts
+- role: openshift_facts
diff --git a/roles/openshift_version/defaults/main.yml b/roles/openshift_version/defaults/main.yml
index 01a1a7472..354699637 100644
--- a/roles/openshift_version/defaults/main.yml
+++ b/roles/openshift_version/defaults/main.yml
@@ -1,2 +1,10 @@
---
openshift_protect_installed_version: True
+
+openshift_service_type_dict:
+ origin: origin
+ openshift-enterprise: atomic-openshift
+
+openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
+
+openshift_use_crio_only: False
diff --git a/roles/openshift_version/meta/main.yml b/roles/openshift_version/meta/main.yml
index 5d7683120..d0ad4b7d2 100644
--- a/roles/openshift_version/meta/main.yml
+++ b/roles/openshift_version/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_utils
+- role: openshift_facts
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index 4f9158ade..ae0f68a5b 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -101,13 +101,13 @@
when: is_containerized | bool
- block:
- - name: Get available {{ openshift.common.service_type}} version
+ - name: Get available {{ openshift_service_type}} version
repoquery:
- name: "{{ openshift.common.service_type}}"
+ name: "{{ openshift_service_type}}"
ignore_excluders: true
register: rpm_results
- fail:
- msg: "Package {{ openshift.common.service_type}} not found"
+ msg: "Package {{ openshift_service_type}} not found"
when: not rpm_results.results.package_found
- set_fact:
openshift_rpm_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}"
@@ -196,7 +196,7 @@
- openshift_version.startswith(openshift_release) | bool
msg: |-
You requested openshift_release {{ openshift_release }}, which is not matched by
- the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }}
+ the latest OpenShift RPM we detected as {{ openshift_service_type }}-{{ openshift_version }}
on host {{ inventory_hostname }}.
We will only install the latest RPMs, so please ensure you are getting the release
you expect. You may need to adjust your Ansible inventory, modify the repositories
diff --git a/roles/openshift_version/tasks/set_version_rpm.yml b/roles/openshift_version/tasks/set_version_rpm.yml
index c40777bf1..c7ca5ceae 100644
--- a/roles/openshift_version/tasks/set_version_rpm.yml
+++ b/roles/openshift_version/tasks/set_version_rpm.yml
@@ -8,14 +8,14 @@
- openshift_version is not defined
- block:
- - name: Get available {{ openshift.common.service_type}} version
+ - name: Get available {{ openshift_service_type}} version
repoquery:
- name: "{{ openshift.common.service_type}}"
+ name: "{{ openshift_service_type}}"
ignore_excluders: true
register: rpm_results
- fail:
- msg: "Package {{ openshift.common.service_type}} not found"
+ msg: "Package {{ openshift_service_type}} not found"
when: not rpm_results.results.package_found
- set_fact:
diff --git a/roles/rhel_subscribe/README.md b/roles/rhel_subscribe/README.md
new file mode 100644
index 000000000..15eaf4f30
--- /dev/null
+++ b/roles/rhel_subscribe/README.md
@@ -0,0 +1,29 @@
+RHEL Subscribe
+==============
+
+Subscribes the RHEL servers and add the OpenShift enterprise repos.
+
+Role variables
+--------------
+
+### `rhsub_user`
+
+Username for the subscription-manager.
+
+### `rhsub_pass`
+
+Password for the subscription-manager.
+
+### `rhsub_pool`
+
+Name of the pool to attach (optional).
+
+### `rhsub_server`
+
+Custom hostname for the Satellite server (optional).
+
+### `openshift_release`
+
+Version for the OpenShift Enterprise repositories.
+
+Example: `3.6`
diff --git a/roles/rhel_subscribe/defaults/main.yml b/roles/rhel_subscribe/defaults/main.yml
new file mode 100644
index 000000000..80b2ab919
--- /dev/null
+++ b/roles/rhel_subscribe/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+rhsub_pool: 'Red Hat OpenShift Container Platform, Premium*'
diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml
deleted file mode 100644
index fa74c9953..000000000
--- a/roles/rhel_subscribe/tasks/enterprise.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- name: Disable all repositories
- command: subscription-manager repos --disable="*"
-
-- set_fact:
- default_ose_version: '3.6'
- when: deployment_type == 'openshift-enterprise'
-
-- set_fact:
- ose_version: "{{ lookup('env', 'ose_version') | default(default_ose_version, True) }}"
-
-- fail:
- msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type"
- when:
- - deployment_type == 'openshift-enterprise'
- - ose_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6'] )
-
-- name: Enable RHEL repositories
- command: subscription-manager repos \
- --enable="rhel-7-server-rpms" \
- --enable="rhel-7-server-extras-rpms" \
- --enable="rhel-7-server-ose-{{ ose_version }}-rpms" \
- --enable="rhel-7-fast-datapath-rpms"
- register: subscribe_repos
- until: subscribe_repos | succeeded
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index f83cf9157..74ee8bbfe 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -1,12 +1,8 @@
---
-# TODO: Enhance redhat_subscription module
-# to make it able to attach to a pool
-# to make it able to enable repositories
-
- set_fact:
+ rhel_subscription_pass: "{{ lookup('env', 'rhel_subscription_pass') | default(rhsub_pass | default(omit, True)) }}"
rhel_subscription_pool: "{{ lookup('env', 'rhel_subscription_pool') | default(rhsub_pool | default('Red Hat OpenShift Container Platform, Premium*')) }}"
rhel_subscription_user: "{{ lookup('env', 'rhel_subscription_user') | default(rhsub_user | default(omit, True)) }}"
- rhel_subscription_pass: "{{ lookup('env', 'rhel_subscription_pass') | default(rhsub_pass | default(omit, True)) }}"
rhel_subscription_server: "{{ lookup('env', 'rhel_subscription_server') | default(rhsub_server | default(omit, True)) }}"
- fail:
@@ -14,23 +10,12 @@
when: ansible_distribution != 'RedHat'
- fail:
- msg: Either rhsub_user or the rhel_subscription_user env variable are required for this role.
- when: rhel_subscription_user is not defined
+ msg: The rhel_subscription_user variable is required for this role.
+ when: rhel_subscription_user is not defined or not rhsub_user is not defined
- fail:
- msg: Either rhsub_pass or the rhel_subscription_pass env variable are required for this role.
- when: rhel_subscription_pass is not defined
-
-- name: Detecting Atomic Host Operating System
- stat:
- path: /run/ostree-booted
- register: ostree_booted
-
-- name: Satellite preparation
- command: "rpm -Uvh http://{{ rhel_subscription_server }}/pub/katello-ca-consumer-latest.noarch.rpm"
- args:
- creates: /etc/rhsm/ca/katello-server-ca.pem
- when: rhel_subscription_server is defined and rhel_subscription_server
+ msg: The rhel_subscription_pass variable is required for this role.
+ when: rhel_subscription_pass is not defined or not rhsub_pass is not defined
- name: Install Red Hat Subscription manager
yum:
@@ -39,37 +24,58 @@
register: result
until: result | success
-- name: RedHat subscriptions
+- name: Is host already registered?
+ command: bash -c "subscription-manager version"
+ register: rh_subscribed
+ changed_when: "'not registered' in rh_subscribed.stdout"
+ ignore_errors: yes
+
+- name: Register host
redhat_subscription:
username: "{{ rhel_subscription_user }}"
password: "{{ rhel_subscription_pass }}"
register: rh_subscription
until: rh_subscription | succeeded
+ when:
+ - "'not registered' in rh_subscribed.stdout"
+ - rhel_subscription_user is defined
+ - rhel_subscription_pass is defined
-- name: Retrieve the OpenShift Pool ID
- command: subscription-manager list --available --matches="{{ rhel_subscription_pool }}" --pool-only
- register: openshift_pool_id
- until: openshift_pool_id | succeeded
- changed_when: False
+- fail:
+ msg: 'Unable to register host with Red Hat Subscription Manager'
+ when:
+ - "'not registered' in rh_subscribed.stdout"
+ - rh_subscription.failed
- name: Determine if OpenShift Pool Already Attached
- command: subscription-manager list --consumed --matches="{{ rhel_subscription_pool }}" --pool-only
+ command: bash -c "subscription-manager list --consumed --pool-only --matches '*OpenShift*' | grep {{ rhel_subscription_pool }}"
register: openshift_pool_attached
- until: openshift_pool_attached | succeeded
- changed_when: False
- when: openshift_pool_id.stdout == ''
+ changed_when: rhel_subscription_pool not in openshift_pool_attached.stdout
+ failed_when: openshift_pool_attached.rc == 2
+ ignore_errors: yes
+
+- name: Retrieve the OpenShift Pool ID
+ command: bash -c "subscription-manager list --available --pool-only --matches '*OpenShift*' | grep {{ rhel_subscription_pool }}"
+ register: openshift_pool_retrieve
+ changed_when: rhel_subscription_pool in openshift_pool_retrieve.stdout
+ when: rhel_subscription_pool not in openshift_pool_attached.stdout
+ ignore_errors: yes
- fail:
- msg: "Unable to find pool matching {{ rhel_subscription_pool }} in available or consumed pools"
- when: openshift_pool_id.stdout == '' and openshift_pool_attached is defined and openshift_pool_attached.stdout == ''
+ msg: "Unable to find pool matching {{ rhel_subscription_pool }} in available pools"
+ when:
+ - rhel_subscription_pool not in openshift_pool_attached.stdout
+ - rhel_subscription_pool not in openshift_pool_retrieve.stdout
- name: Attach to OpenShift Pool
- command: subscription-manager attach --pool {{ openshift_pool_id.stdout_lines[0] }}
- register: subscribe_pool
- until: subscribe_pool | succeeded
- when: openshift_pool_id.stdout != ''
+ command: bash -c "subscription-manager attach --pool {{ rhel_subscription_pool }}"
+ register: openshift_pool_attached
+ changed_when: "'Successfully attached a subscription' in openshift_pool_attached.stdout"
+ when: rhel_subscription_pool not in openshift_pool_attached.stdout
-- include_tasks: enterprise.yml
+- include_role:
+ role: rhel_subscribe
+ tasks_from: satellite
when:
- - deployment_type == 'openshift-enterprise'
- - not ostree_booted.stat.exists | bool
+ - (rhel_subscription_server or rhsub_server) is defined
+ - (rhel_subscription_server or rhsub_server)
diff --git a/roles/rhel_subscribe/tasks/satellite.yml b/roles/rhel_subscribe/tasks/satellite.yml
new file mode 100644
index 000000000..b2b2a621d
--- /dev/null
+++ b/roles/rhel_subscribe/tasks/satellite.yml
@@ -0,0 +1,5 @@
+---
+- name: Satellite preparation
+ command: "rpm -Uvh http://{{ rhel_subscription_server }}/pub/katello-ca-consumer-latest.noarch.rpm"
+ args:
+ creates: /etc/rhsm/ca/katello-server-ca.pem
diff --git a/setup.py b/setup.py
index 5bf48b5ad..5ba050b83 100644
--- a/setup.py
+++ b/setup.py
@@ -334,9 +334,9 @@ class OpenShiftAnsibleSyntaxCheck(Command):
result = self.deprecate_jinja2_in_when(yaml_contents, yaml_file)
has_errors = result or has_errors
- # TODO (rteague): This test will be enabled once we move to Ansible 2.4
- # result = self.deprecate_include(yaml_contents, yaml_file)
- # has_errors = result or has_errors
+ # Check for usage of include: directive
+ result = self.deprecate_include(yaml_contents, yaml_file)
+ has_errors = result or has_errors
if not has_errors:
print('...PASSED')
@@ -345,35 +345,29 @@ class OpenShiftAnsibleSyntaxCheck(Command):
print('-' * 60)
print('Syntax checking playbook: {}'.format(playbook))
- # Error on any entry points in 'common'
- if 'common' in playbook:
- print('{}Invalid entry point playbook. All playbooks must'
- ' start in playbooks/byo{}'.format(self.FAIL, self.ENDC))
- has_errors = True
# --syntax-check each entry point playbook
- else:
- try:
- # Create a host group list to avoid WARNING on unmatched host patterns
- host_group_list = [
- 'etcd,masters,nodes,OSEv3',
- 'oo_all_hosts',
- 'oo_etcd_to_config,oo_new_etcd_to_config,oo_first_etcd,oo_etcd_hosts_to_backup,'
- 'oo_etcd_hosts_to_upgrade,oo_etcd_to_migrate',
- 'oo_masters,oo_masters_to_config,oo_first_master,oo_containerized_master_nodes',
- 'oo_nodes_to_config,oo_nodes_to_upgrade',
- 'oo_nodes_use_kuryr,oo_nodes_use_flannel',
- 'oo_nodes_use_calico,oo_nodes_use_nuage,oo_nodes_use_contiv',
- 'oo_lb_to_config',
- 'oo_nfs_to_config',
- 'glusterfs,glusterfs_registry,']
- subprocess.check_output(
- ['ansible-playbook', '-i ' + ','.join(host_group_list),
- '--syntax-check', playbook]
- )
- except subprocess.CalledProcessError as cpe:
- print('{}Execution failed: {}{}'.format(
- self.FAIL, cpe, self.ENDC))
- has_errors = True
+ try:
+ # Create a host group list to avoid WARNING on unmatched host patterns
+ host_group_list = [
+ 'etcd,masters,nodes,OSEv3',
+ 'oo_all_hosts',
+ 'oo_etcd_to_config,oo_new_etcd_to_config,oo_first_etcd,oo_etcd_hosts_to_backup,'
+ 'oo_etcd_hosts_to_upgrade,oo_etcd_to_migrate',
+ 'oo_masters,oo_masters_to_config,oo_first_master,oo_containerized_master_nodes',
+ 'oo_nodes_to_config,oo_nodes_to_upgrade',
+ 'oo_nodes_use_kuryr,oo_nodes_use_flannel',
+ 'oo_nodes_use_calico,oo_nodes_use_nuage,oo_nodes_use_contiv',
+ 'oo_lb_to_config',
+ 'oo_nfs_to_config',
+ 'glusterfs,glusterfs_registry,']
+ subprocess.check_output(
+ ['ansible-playbook', '-i ' + ','.join(host_group_list),
+ '--syntax-check', playbook]
+ )
+ except subprocess.CalledProcessError as cpe:
+ print('{}Execution failed: {}{}'.format(
+ self.FAIL, cpe, self.ENDC))
+ has_errors = True
if has_errors:
raise SystemExit(1)
diff --git a/test/integration/build-images.sh b/test/integration/build-images.sh
index 74a55fa51..74a55fa51 100755..100644
--- a/test/integration/build-images.sh
+++ b/test/integration/build-images.sh
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml
index 9875de9aa..006a71bd9 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml
@@ -1,6 +1,6 @@
---
# NOTE: this test is probably superfluous since openshift_version already does it
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -15,7 +15,7 @@
- block:
# put the repo back to disabled
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_name: "ose-3.2", repo_enabled: 0 }
- action: openshift_health_check
@@ -23,4 +23,4 @@
checks: [ 'package_availability' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml
index 16ff41673..b4f18e3b5 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml
@@ -1,5 +1,5 @@
---
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -17,4 +17,4 @@
checks: [ 'package_availability' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml
index 9f3aad7bd..7998023ae 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml
@@ -1,5 +1,5 @@
---
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -14,7 +14,7 @@
post_tasks:
- block:
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_name: "break-yum" }
- action: openshift_health_check
@@ -22,4 +22,4 @@
checks: [ 'package_update' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml
index 84e9360f5..3b8b15ff3 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml
@@ -1,5 +1,5 @@
---
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -14,7 +14,7 @@
post_tasks:
- block:
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_name: "break-yum" }
- name: Break the break-yum repo
@@ -29,4 +29,4 @@
checks: [ 'package_update' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml
index f4c1bedfa..269c0250b 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml
@@ -1,5 +1,5 @@
---
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -19,4 +19,4 @@
checks: [ 'package_update' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml
index 409057792..92408a669 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml
@@ -1,5 +1,5 @@
---
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -14,7 +14,7 @@
post_tasks:
- block:
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_name: "break-yum" }
- name: Remove the local repo entirely
@@ -25,4 +25,4 @@
checks: [ 'package_update' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml
index d88f82a4a..4e2b8a50c 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml
@@ -1,5 +1,5 @@
---
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -15,7 +15,7 @@
- block:
# disable extras so we control docker version
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_file: "CentOS-Base", repo_name: "extras", repo_enabled: 0 }
- action: openshift_health_check
@@ -23,4 +23,4 @@
checks: [ 'package_version' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml
index 401ad1e21..e1f8d74e6 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml
@@ -1,6 +1,6 @@
---
# NOTE: this test is probably superfluous since openshift_version already does it
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -16,14 +16,14 @@
- block:
# put the repo back to disabled
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_name: "ose-3.2", repo_enabled: 0 }
# test with wrong repo enabled
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_name: "ose-3.3" }
- action: openshift_health_check
args:
checks: [ 'package_version' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml
index 88613802b..600bbe9c3 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml
@@ -1,5 +1,5 @@
---
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -14,11 +14,11 @@
- block:
# enable repo with extra minor version available
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_name: "ose-3.3" }
# disable extras so we control docker version
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_file: "CentOS-Base", repo_name: "extras", repo_enabled: 0 }
- action: openshift_health_check
@@ -26,4 +26,4 @@
checks: [ 'package_version' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml
index da3f6b844..079ca4253 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml
@@ -1,5 +1,5 @@
---
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -17,4 +17,4 @@
checks: [ 'package_version' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/setup_container.yml b/test/integration/openshift_health_checker/setup_container.yml
index e3459b376..0f0f8d366 100644
--- a/test/integration/openshift_health_checker/setup_container.yml
+++ b/test/integration/openshift_health_checker/setup_container.yml
@@ -46,16 +46,15 @@
- hosts: all
tasks:
-
# run before openshift_version to prevent it breaking
- - include: preflight/playbooks/tasks/enable_repo.yml
+ - include_tasks: preflight/playbooks/tasks/enable_repo.yml
vars: { repo_name: "ose-3.2" }
-- include: ../../../playbooks/init/main.yml
+- import_playbook: ../../../playbooks/init/main.yml
- hosts: all
tasks:
# put it back like it was for the tests
- - include: preflight/playbooks/tasks/enable_repo.yml
+ - include_tasks: preflight/playbooks/tasks/enable_repo.yml
vars: { repo_name: "ose-3.2", enabled: False }
diff --git a/test/integration/run-tests.sh b/test/integration/run-tests.sh
index 680b64602..680b64602 100755..100644
--- a/test/integration/run-tests.sh
+++ b/test/integration/run-tests.sh
diff --git a/tox.ini b/tox.ini
index 46738cae5..9c35915d5 100644
--- a/tox.ini
+++ b/tox.ini
@@ -3,7 +3,6 @@ minversion=2.3.1
envlist =
py{27,35}-{flake8,pylint,unit}
py27-{yamllint,ansible_syntax,generate_validation}
- integration
skipsdist=True
skip_missing_interpreters=True
@@ -14,7 +13,6 @@ deps =
-rtest-requirements.txt
unit: -eutils
py35-flake8: flake8-bugbear==17.3.0
- integration: docker-py==1.10.6
commands =
unit: pytest {posargs}
@@ -23,4 +21,3 @@ commands =
yamllint: python setup.py yamllint
generate_validation: python setup.py generate_validation
ansible_syntax: python setup.py ansible_syntax
- integration: python -c 'print("run test/integration/run-tests.sh")'